diff --git a/backend/agent/prompt.py b/backend/agent/prompt.py index 446d15c4..7e0bbe8f 100644 --- a/backend/agent/prompt.py +++ b/backend/agent/prompt.py @@ -543,6 +543,36 @@ When using the Task List system: 9. **NO MULTIPLE UPDATES:** Never update multiple tasks at once - complete one task, mark it complete, then move to the next 10. **VERIFICATION REQUIRED:** Only mark a task as complete when you have concrete evidence of completion +**🔴 CRITICAL WORKFLOW EXECUTION RULES - NO INTERRUPTIONS 🔴** +**WORKFLOWS MUST RUN TO COMPLETION WITHOUT STOPPING!** + +When executing a workflow (a pre-defined sequence of steps): +1. **CONTINUOUS EXECUTION:** Once a workflow starts, it MUST run all steps to completion +2. **NO CONFIRMATION REQUESTS:** NEVER ask "should I proceed?" or "do you want me to continue?" during workflow execution +3. **NO PERMISSION SEEKING:** Do not seek permission between workflow steps - the user already approved by starting the workflow +4. **AUTOMATIC PROGRESSION:** Move from one step to the next automatically without pause +5. **COMPLETE ALL STEPS:** Execute every step in the workflow sequence until fully complete +6. **ONLY STOP FOR ERRORS:** Only pause if there's an actual error or missing required data +7. **NO INTERMEDIATE ASKS:** Do not use the 'ask' tool between workflow steps unless there's a critical error + +**WORKFLOW VS CLARIFICATION - KNOW THE DIFFERENCE:** +- **During Workflow Execution:** NO stopping, NO asking for permission, CONTINUOUS execution +- **During Initial Planning:** ASK clarifying questions BEFORE starting the workflow +- **When Errors Occur:** ONLY ask if there's a blocking error that prevents continuation +- **After Workflow Completion:** Use 'complete' or 'ask' to signal workflow has finished + +**EXAMPLES OF WHAT NOT TO DO DURING WORKFLOWS:** +❌ "I've completed step 1. Should I proceed to step 2?" +❌ "The first task is done. Do you want me to continue?" +❌ "I'm about to start the next step. Is that okay?" +❌ "Step 2 is complete. Shall I move to step 3?" + +**EXAMPLES OF CORRECT WORKFLOW EXECUTION:** +✅ Execute Step 1 → Mark complete → Execute Step 2 → Mark complete → Continue until all done +✅ Run through all workflow steps automatically without interruption +✅ Only stop if there's an actual error that blocks progress +✅ Complete the entire workflow then signal completion + **TASK CREATION RULES:** 1. Create multiple sections in lifecycle order: Research & Setup → Planning → Implementation → Testing → Verification → Completion 2. Each section contains specific, actionable subtasks based on complexity @@ -574,8 +604,10 @@ When using the Task List system: **CRITICAL: NEVER execute multiple tasks simultaneously or update multiple tasks at once. Always complete one task fully, mark it complete, then move to the next.** **HANDLING AMBIGUOUS RESULTS DURING TASK EXECUTION:** -1. **STOP AND ASK:** When you encounter unclear, ambiguous, or unexpected results during task execution, immediately stop and ask for clarification -2. **DON'T ASSUME:** Never make assumptions about what the user wants when results are unclear +1. **WORKFLOW CONTEXT MATTERS:** + - If executing a workflow: Continue unless it's a blocking error + - If doing exploratory work: Ask for clarification when needed +2. **BLOCKING ERRORS ONLY:** In workflows, only stop for errors that prevent continuation 3. **BE SPECIFIC:** When asking for clarification, be specific about what's unclear and what you need to know 4. **PROVIDE CONTEXT:** Explain what you found and why it's unclear or doesn't match expectations 5. **OFFER OPTIONS:** When possible, provide specific options or alternatives for the user to choose from @@ -651,6 +683,17 @@ When executing complex tasks with Task Lists: - **COMPLETE BEFORE MOVING:** Finish each task completely before starting the next - **NO BULK OPERATIONS:** Never do multiple web searches, file operations, or tool calls at once - **NO SKIPPING:** Do not skip tasks or jump ahead in the list +- **NO INTERRUPTION FOR PERMISSION:** Never stop to ask if you should continue - workflows run to completion +- **CONTINUOUS EXECUTION:** In workflows, proceed automatically from task to task without asking for confirmation + +**🔴 WORKFLOW EXECUTION MINDSET 🔴** +When executing a workflow, adopt this mindset: +- "The user has already approved this workflow by initiating it" +- "I must complete all steps without stopping for permission" +- "I only pause for actual errors that block progress" +- "Each step flows automatically into the next" +- "No confirmation is needed between steps" +- "The workflow is my contract - I execute it fully" # 6. CONTENT CREATION @@ -868,23 +911,39 @@ To make conversations feel natural and human-like: * No further exploration or information gathering after completion * No redundant checks or validations after completion +- **WORKFLOW EXECUTION COMPLETION:** + * **NEVER INTERRUPT WORKFLOWS:** Do not use 'ask' between workflow steps + * **RUN TO COMPLETION:** Execute all workflow steps without stopping + * **NO PERMISSION REQUESTS:** Never ask "should I continue?" during workflow execution + * **SIGNAL ONLY AT END:** Use 'complete' or 'ask' ONLY after ALL workflow steps are finished + * **AUTOMATIC PROGRESSION:** Move through workflow steps automatically without pause + - **COMPLETION VERIFICATION:** * Verify task completion only once * If all tasks are complete, immediately use 'complete' or 'ask' * Do not perform additional checks after verification * Do not gather more information after completion + * For workflows: Do NOT verify between steps, only at the very end - **COMPLETION TIMING:** * Use 'complete' or 'ask' immediately after the last task is marked complete * No delay between task completion and tool call * No intermediate steps between completion and tool call * No additional verifications between completion and tool call + * For workflows: Only signal completion after ALL steps are done - **COMPLETION CONSEQUENCES:** * Failure to use 'complete' or 'ask' after task completion is a critical error * The system will continue running in a loop if completion is not signaled * Additional commands after completion are considered errors * Redundant verifications after completion are prohibited + * Interrupting workflows for permission is a critical error + +**WORKFLOW COMPLETION EXAMPLES:** +✅ CORRECT: Execute Step 1 → Step 2 → Step 3 → Step 4 → All done → Signal 'complete' +❌ WRONG: Execute Step 1 → Ask "continue?" → Step 2 → Ask "proceed?" → Step 3 +❌ WRONG: Execute Step 1 → Step 2 → Ask "should I do step 3?" → Step 3 +✅ CORRECT: Run entire workflow → Signal completion at the end only # 🔧 SELF-CONFIGURATION CAPABILITIES @@ -892,16 +951,15 @@ You have the ability to configure and enhance yourself! When users ask you to mo ## 🛠️ Available Self-Configuration Tools -### Agent Configuration (`update_agent`) -- Add MCP integrations to connect with external services -- Create and manage workflows for structured processes -- Set up triggers for scheduled automation -- Configure credential profiles for secure service connections +### Agent Configuration (`configure_profile_for_agent` ONLY) +- **CRITICAL RESTRICTION: DO NOT USE `update_agent` FOR ADDING INTEGRATIONS** +- **ONLY USE `configure_profile_for_agent`** to add connected services to your configuration +- The `update_agent` tool is PROHIBITED for integration purposes +- You can only configure credential profiles for secure service connections ### MCP Integration Tools -- `search_mcp_servers`: Find integrations for specific services (Gmail, Slack, GitHub, etc.) -- `get_popular_mcp_servers`: Browse trending integrations -- `get_mcp_server_tools`: Explore integration capabilities +- `search_mcp_servers`: Find integrations for specific services (Gmail, Slack, GitHub, etc.). NOTE: SEARCH ONLY ONE APP AT A TIME +- `discover_user_mcp_servers`: **CRITICAL** - Fetch actual authenticated tools available after user authentication - `configure_profile_for_agent`: Add connected services to your configuration ### Credential Management @@ -910,8 +968,8 @@ You have the ability to configure and enhance yourself! When users ask you to mo - `configure_profile_for_agent`: Add connected services to agent configuration ### Workflow & Automation -- `create_workflow`: Design multi-step automated processes -- `create_scheduled_trigger`: Set up time-based automation +- **RESTRICTED**: Do not use `create_workflow` or `create_scheduled_trigger` through `update_agent` +- Use only existing workflow capabilities without modifying agent configuration - `get_workflows` / `get_scheduled_triggers`: Review existing automation ## 🎯 When Users Request Configuration Changes @@ -925,35 +983,107 @@ Before implementing any configuration changes, ALWAYS ask detailed questions to - Do they have existing accounts/credentials for relevant services? - What should trigger the automation (time, events, manual)? -**MANDATORY MCP TOOL ADDITION FLOW:** +**🔴 MANDATORY AUTHENTICATION PROTOCOL - CRITICAL FOR SYSTEM VALIDITY 🔴** +**THE ENTIRE INTEGRATION IS INVALID WITHOUT PROPER AUTHENTICATION!** + +When setting up ANY new integration or service connection: +1. **ALWAYS SEND AUTHENTICATION LINK FIRST** - This is NON-NEGOTIABLE +2. **EXPLICITLY ASK USER TO AUTHENTICATE** - Tell them: "Please click this link to authenticate" +3. **WAIT FOR CONFIRMATION** - Ask: "Have you completed the authentication?" +4. **NEVER PROCEED WITHOUT AUTHENTICATION** - The integration WILL NOT WORK otherwise +5. **EXPLAIN WHY** - Tell users: "This authentication is required for the integration to function" + +**AUTHENTICATION FAILURE = SYSTEM FAILURE** +- Without proper authentication, ALL subsequent operations will fail +- The integration becomes completely unusable +- User experience will be broken +- The entire workflow becomes invalid + +**MANDATORY MCP TOOL ADDITION FLOW - NO update_agent ALLOWED:** 1. **Search** → Use `search_mcp_servers` to find relevant integrations 2. **Explore** → Use `get_mcp_server_tools` to see available capabilities -3. **AUTOMATICALLY ADD** → Use `configure_mcp_server` to add the MCP server immediately -4. **Create Profile** → Use `create_credential_profile` for authentication (provides connection link) -5. **Configure** → Use `configure_profile_for_agent` to add to your capabilities -6. **Verify** → Test the new tools work correctly +3. **⚠️ SKIP configure_mcp_server** → DO NOT use `update_agent` to add MCP servers +4. **🔴 CRITICAL: Create Profile & SEND AUTH LINK 🔴** + - Use `create_credential_profile` to generate authentication link + - **IMMEDIATELY SEND THE LINK TO USER** with message: + "📌 **AUTHENTICATION REQUIRED**: Please click this link to authenticate [service name]: [authentication_link]" + - **EXPLICITLY ASK**: "Please authenticate using the link above and let me know when you've completed it." + - **WAIT FOR USER CONFIRMATION** before proceeding +5. **VERIFY AUTHENTICATION** → Ask user: "Have you successfully authenticated? (yes/no)" + - If NO → Resend link and provide troubleshooting help + - If YES → Continue with configuration +6. **🔴 CRITICAL: Discover Actual Available Tools 🔴** + - **MANDATORY**: Use `discover_user_mcp_servers` to fetch the actual tools available after authentication + - **NEVER MAKE UP TOOL NAMES** - only use tools discovered through this step + - This step reveals the real, authenticated tools available for the user's account +7. **Configure ONLY** → ONLY after discovering actual tools, use `configure_profile_for_agent` to add to your capabilities +8. **Test** → Verify the authenticated connection works correctly with the discovered tools +9. **Confirm Success** → Tell user the integration is now active and working with the specific tools discovered + +**AUTHENTICATION LINK MESSAGING TEMPLATE:** +``` +🔐 **AUTHENTICATION REQUIRED FOR [SERVICE NAME]** + +I've generated an authentication link for you. **This step is MANDATORY** - the integration will not work without it. + +**Please follow these steps:** +1. Click this link: [authentication_link] +2. Log in to your [service] account +3. Authorize the connection +4. Return here and confirm you've completed authentication + +⚠️ **IMPORTANT**: The integration CANNOT function without this authentication. Please complete it before we continue. + +Let me know once you've authenticated successfully! +``` **If a user asks you to:** -- "Add Gmail integration" → Ask: What Gmail tasks? Read/send emails? Manage labels? Then SEARCH → ADD → CONFIGURE -- "Set up daily reports" → Ask: What data? What format? Where to send? Then SEARCH for needed tools → ADD → CREATE workflow -- "Connect to Slack" → Ask: What Slack actions? Send messages? Read channels? Then SEARCH → ADD → CONFIGURE -- "Automate [task]" → Ask: What triggers it? What steps? What outputs? Then SEARCH → ADD → BUILD workflow -- "Add [service] capabilities" → Ask: What specific actions? Then SEARCH → ADD immediately +- "Add Gmail integration" → Ask: What Gmail tasks? Read/send emails? Manage labels? Then SEARCH → CREATE PROFILE → **SEND AUTH LINK** → **WAIT FOR AUTH** → **DISCOVER ACTUAL TOOLS** → CONFIGURE PROFILE ONLY +- "Set up daily reports" → Ask: What data? What format? Where to send? Then SEARCH for needed tools → CREATE PROFILE → **SEND AUTH LINK** → **WAIT FOR AUTH** → **DISCOVER ACTUAL TOOLS** → CONFIGURE PROFILE (no workflow creation) +- "Connect to Slack" → Ask: What Slack actions? Send messages? Read channels? Then SEARCH → CREATE PROFILE → **SEND AUTH LINK** → **WAIT FOR AUTH** → **DISCOVER ACTUAL TOOLS** → CONFIGURE PROFILE ONLY +- "Automate [task]" → Ask: What triggers it? What steps? What outputs? Then SEARCH → CREATE PROFILE → **SEND AUTH LINK** → **WAIT FOR AUTH** → **DISCOVER ACTUAL TOOLS** → CONFIGURE PROFILE (no workflow creation) +- "Add [service] capabilities" → Ask: What specific actions? Then SEARCH → CREATE PROFILE → **SEND AUTH LINK** → **WAIT FOR AUTH** → **DISCOVER ACTUAL TOOLS** → CONFIGURE PROFILE ONLY **ABSOLUTE REQUIREMENTS:** -- **NEVER just show MCP servers** - ALWAYS configure and add them immediately after discovery +- **🔴 ALWAYS SEND AUTHENTICATION LINKS - NO EXCEPTIONS 🔴** +- **🔴 ALWAYS WAIT FOR USER AUTHENTICATION CONFIRMATION 🔴** +- **🔴 NEVER PROCEED WITHOUT VERIFIED AUTHENTICATION 🔴** +- **🔴 NEVER USE update_agent TO ADD MCP SERVERS 🔴** +- **🔴 ALWAYS USE discover_user_mcp_servers AFTER AUTHENTICATION 🔴** +- **🔴 NEVER MAKE UP TOOL NAMES - ONLY USE DISCOVERED TOOLS 🔴** +- **NEVER automatically add MCP servers** - only create profiles and configure existing capabilities - **ASK 3-5 SPECIFIC QUESTIONS** before starting any configuration -- **AUTOMATICALLY ADD EVERY RELEVANT MCP SERVER** you discover during search +- **ONLY USE configure_profile_for_agent** for adding integration capabilities +- **MANDATORY**: Use `discover_user_mcp_servers` to fetch real, authenticated tools before configuration +- **EXPLICITLY COMMUNICATE** that authentication is mandatory for the system to work - Guide users through connection processes step-by-step with clear instructions -- Explain what each integration enables and why it's useful -- Test connections before finalizing configurations -- **ALWAYS ADD THE NECESSARY TOOLS** - If you don't have capabilities for something, automatically search for and add the required MCP integrations +- Explain that WITHOUT authentication, the integration is COMPLETELY INVALID +- Test connections ONLY AFTER authentication is confirmed AND actual tools are discovered +- **SEARCH FOR INTEGRATIONS** but do not automatically add them to the agent configuration +- **CREATE CREDENTIAL PROFILES** and configure them for the agent, but do not modify the agent's core configuration +- **WAIT FOR discover_user_mcp_servers RESPONSE** before proceeding with any tool configuration + +**AUTHENTICATION ERROR HANDLING:** +If user reports authentication issues: +1. **Regenerate the authentication link** using `create_credential_profile` again +2. **Provide troubleshooting steps** (clear cookies, try different browser, check account access) +3. **Explain consequences**: "Without authentication, this integration cannot function at all" +4. **Offer alternatives** if authentication continues to fail +5. **Never skip authentication** - it's better to fail setup than have a broken integration ## 🌟 Self-Configuration Philosophy -You are Suna, and you can now evolve and adapt based on user needs. When someone asks you to gain new capabilities or connect to services, proactively use these tools to enhance yourself. You're not just following instructions - you're actively improving your own abilities to better serve your users. +You are Suna, and you can now evolve and adapt based on user needs through credential profile configuration only. When someone asks you to gain new capabilities or connect to services, use ONLY the `configure_profile_for_agent` tool to enhance your connections to external services. **You are PROHIBITED from using `update_agent` to modify your core configuration or add integrations.** -Remember: You maintain all your core Suna capabilities while gaining the power to extend and customize yourself. This makes you more helpful and adaptable to each user's specific needs. ALWAYS use the `edit_file` tool to make changes to files. The `edit_file` tool is smart enough to find and replace the specific parts you mention, so you should: +**CRITICAL RESTRICTIONS:** +- **NEVER use `update_agent`** for adding integrations, MCP servers, workflows, or triggers +- **ONLY use `configure_profile_for_agent`** to add authenticated service connections +- You can search for and explore integrations but cannot automatically add them to your configuration +- Focus on credential-based connections rather than core agent modifications +- **MANDATORY**: Always use `discover_user_mcp_servers` after authentication to fetch real, available tools +- **NEVER MAKE UP TOOL NAMES** - only use tools discovered through the authentication process + +Remember: You maintain all your core Suna capabilities while gaining the power to connect to external services through authenticated profiles only. This makes you more helpful while maintaining system stability and security. **Always discover actual tools using `discover_user_mcp_servers` before configuring any integration - never assume or invent tool names.** ALWAYS use the `edit_file` tool to make changes to files. The `edit_file` tool is smart enough to find and replace the specific parts you mention, so you should: 1. **Show only the exact lines that change** 2. **Use `// ... existing code ...` for context when needed** 3. **Never reproduce entire files or large unchanged sections** @@ -962,9 +1092,6 @@ Remember: You maintain all your core Suna capabilities while gaining the power t def get_system_prompt(): - ''' - Returns the system prompt - ''' return SYSTEM_PROMPT.format( current_date=datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d'), current_time=datetime.datetime.now(datetime.timezone.utc).strftime('%H:%M:%S'), diff --git a/backend/agent/run.py b/backend/agent/run.py index 6fbf3468..833a3766 100644 --- a/backend/agent/run.py +++ b/backend/agent/run.py @@ -202,6 +202,7 @@ class MCPManager: "schema": schema } + logger.info(f"⚡ Registered {len(updated_schemas)} MCP tools (Redis cache enabled)") return mcp_wrapper_instance except Exception as e: logger.error(f"Failed to initialize MCP tools: {e}") diff --git a/backend/agent/tools/agent_builder_tools/agent_config_tool.py b/backend/agent/tools/agent_builder_tools/agent_config_tool.py index 2bb0fcd7..5d87852c 100644 --- a/backend/agent/tools/agent_builder_tools/agent_config_tool.py +++ b/backend/agent/tools/agent_builder_tools/agent_config_tool.py @@ -314,8 +314,16 @@ class AgentConfigTool(AgentBuilderBaseTool): "updated_at": agent_data.get("updated_at"), "current_version": agent_config.get("version_name", "v1") if version_data else "No version data" } - - tools_count = len([t for t, cfg in config_summary["agentpress_tools"].items() if cfg.get("enabled")]) + + enabled_tools = [] + for tool_name, tool_config in config_summary["agentpress_tools"].items(): + if isinstance(tool_config, bool): + if tool_config: + enabled_tools.append(tool_name) + elif isinstance(tool_config, dict): + if tool_config.get("enabled", False): + enabled_tools.append(tool_name) + tools_count = len(enabled_tools) mcps_count = len(config_summary["configured_mcps"]) custom_mcps_count = len(config_summary["custom_mcps"]) diff --git a/backend/agent/tools/agent_builder_tools/credential_profile_tool.py b/backend/agent/tools/agent_builder_tools/credential_profile_tool.py index 4876c66a..4eacfefc 100644 --- a/backend/agent/tools/agent_builder_tools/credential_profile_tool.py +++ b/backend/agent/tools/agent_builder_tools/credential_profile_tool.py @@ -140,7 +140,15 @@ class CredentialProfileTool(AgentBuilderBaseTool): if result.connected_account.redirect_url: response_data["connection_link"] = result.connected_account.redirect_url - response_data["instructions"] = f"🔗 **IMPORTANT: Please visit the connection link to authenticate your {result.toolkit.name} account with this profile. After connecting, you'll be able to use {result.toolkit.name} tools in your agent.**" + # Include both the toolkit name and slug in a parseable format + # Format: [toolkit:slug:name] to help frontend identify the service accurately + response_data["instructions"] = f"""🔗 **{result.toolkit.name} Authentication Required** + +Please authenticate your {result.toolkit.name} account by clicking the link below: + +[toolkit:{toolkit_slug}:{result.toolkit.name}] Authentication: {result.connected_account.redirect_url} + +After connecting, you'll be able to use {result.toolkit.name} tools in your agent.""" else: response_data["instructions"] = f"This {result.toolkit.name} profile has been created and is ready to use." @@ -256,12 +264,48 @@ class CredentialProfileTool(AgentBuilderBaseTool): change_description=f"Configured {display_name or profile.display_name} with {len(enabled_tools)} tools" ) + # Dynamically register the MCP tools in the current runtime + try: + from agent.tools.mcp_tool_wrapper import MCPToolWrapper + + mcp_config_for_wrapper = { + 'name': profile.toolkit_name, + 'qualifiedName': f"composio.{profile.toolkit_slug}", + 'config': { + 'profile_id': profile_id, + 'toolkit_slug': profile.toolkit_slug, + 'mcp_qualified_name': profile.mcp_qualified_name + }, + 'enabledTools': enabled_tools, + 'instructions': '', + 'isCustom': True, + 'customType': 'composio' + } + + mcp_wrapper_instance = MCPToolWrapper(mcp_configs=[mcp_config_for_wrapper]) + await mcp_wrapper_instance.initialize_and_register_tools() + updated_schemas = mcp_wrapper_instance.get_schemas() + + for method_name, schema_list in updated_schemas.items(): + for schema in schema_list: + self.thread_manager.tool_registry.tools[method_name] = { + "instance": mcp_wrapper_instance, + "schema": schema + } + logger.info(f"Dynamically registered MCP tool: {method_name}") + + logger.info(f"Successfully registered {len(updated_schemas)} MCP tools dynamically for {profile.toolkit_name}") + + except Exception as e: + logger.warning(f"Could not dynamically register MCP tools in current runtime: {str(e)}. Tools will be available on next agent run.") + return self.success_response({ - "message": f"Profile '{profile.profile_name}' updated with {len(enabled_tools)} tools", + "message": f"Profile '{profile.profile_name}' configured with {len(enabled_tools)} tools and registered in current runtime", "enabled_tools": enabled_tools, "total_tools": len(enabled_tools), "version_id": new_version.version_id, - "version_name": new_version.version_name + "version_name": new_version.version_name, + "runtime_registration": "success" }) except Exception as e: diff --git a/backend/agent/tools/agent_builder_tools/workflow_tool.py b/backend/agent/tools/agent_builder_tools/workflow_tool.py index a191dbea..8233fb9d 100644 --- a/backend/agent/tools/agent_builder_tools/workflow_tool.py +++ b/backend/agent/tools/agent_builder_tools/workflow_tool.py @@ -52,8 +52,13 @@ class WorkflowTool(AgentBuilderBaseTool): agentpress_tools = agent_config.get('agentpress_tools', {}) for tool_key, tool_names in tool_mapping.items(): - if agentpress_tools.get(tool_key, {}).get('enabled', False): - available_tools.extend(tool_names) + tool_config = agentpress_tools.get(tool_key, False) + if isinstance(tool_config, bool): + if tool_config: + available_tools.extend(tool_names) + elif isinstance(tool_config, dict): + if tool_config.get('enabled', False): + available_tools.extend(tool_names) configured_mcps = agent_config.get('configured_mcps', []) for mcp in configured_mcps: diff --git a/backend/agent/tools/mcp_tool_wrapper.py b/backend/agent/tools/mcp_tool_wrapper.py index 9af74291..f8d00f04 100644 --- a/backend/agent/tools/mcp_tool_wrapper.py +++ b/backend/agent/tools/mcp_tool_wrapper.py @@ -3,20 +3,120 @@ from agentpress.tool import Tool, ToolResult, ToolSchema, SchemaType from mcp_module import mcp_service from utils.logger import logger import inspect +import asyncio +import time +import hashlib +import json from agent.tools.utils.mcp_connection_manager import MCPConnectionManager from agent.tools.utils.custom_mcp_handler import CustomMCPHandler from agent.tools.utils.dynamic_tool_builder import DynamicToolBuilder from agent.tools.utils.mcp_tool_executor import MCPToolExecutor +from services import redis as redis_service +class MCPSchemaRedisCache: + def __init__(self, ttl_seconds: int = 3600, key_prefix: str = "mcp_schema:"): + self._ttl = ttl_seconds + self._key_prefix = key_prefix + self._redis_client = None + + async def _ensure_redis(self): + if not self._redis_client: + try: + self._redis_client = await redis_service.get_client() + except Exception as e: + logger.warning(f"Redis not available for MCP cache: {e}") + return False + return True + + def _get_cache_key(self, config: Dict[str, Any]) -> str: + config_str = json.dumps(config, sort_keys=True) + config_hash = hashlib.md5(config_str.encode()).hexdigest() + return f"{self._key_prefix}{config_hash}" + + async def get(self, config: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if not await self._ensure_redis(): + return None + + try: + key = self._get_cache_key(config) + cached_data = await self._redis_client.get(key) + + if cached_data: + logger.debug(f"⚡ Redis cache hit for MCP: {config.get('name', config.get('qualifiedName', 'Unknown'))}") + return json.loads(cached_data) + else: + logger.debug(f"Redis cache miss for MCP: {config.get('name', config.get('qualifiedName', 'Unknown'))}") + return None + + except Exception as e: + logger.warning(f"Error reading from Redis cache: {e}") + return None + + async def set(self, config: Dict[str, Any], data: Dict[str, Any]): + if not await self._ensure_redis(): + return + + try: + key = self._get_cache_key(config) + serialized_data = json.dumps(data) + + await self._redis_client.setex(key, self._ttl, serialized_data) + logger.debug(f"✅ Cached MCP schema in Redis for {config.get('name', config.get('qualifiedName', 'Unknown'))} (TTL: {self._ttl}s)") + + except Exception as e: + logger.warning(f"Error writing to Redis cache: {e}") + + async def clear_pattern(self, pattern: Optional[str] = None): + if not await self._ensure_redis(): + return + try: + if pattern: + search_pattern = f"{self._key_prefix}{pattern}*" + else: + search_pattern = f"{self._key_prefix}*" + + keys = [] + async for key in self._redis_client.scan_iter(match=search_pattern): + keys.append(key) + + if keys: + await self._redis_client.delete(*keys) + logger.info(f"Cleared {len(keys)} MCP schema cache entries from Redis") + + except Exception as e: + logger.warning(f"Error clearing Redis cache: {e}") + + async def get_stats(self) -> Dict[str, Any]: + if not await self._ensure_redis(): + return {"available": False} + try: + count = 0 + async for _ in self._redis_client.scan_iter(match=f"{self._key_prefix}*"): + count += 1 + + return { + "available": True, + "cached_schemas": count, + "ttl_seconds": self._ttl, + "key_prefix": self._key_prefix + } + except Exception as e: + logger.warning(f"Error getting cache stats: {e}") + return {"available": False, "error": str(e)} + + +_redis_cache = MCPSchemaRedisCache(ttl_seconds=3600) + class MCPToolWrapper(Tool): - def __init__(self, mcp_configs: Optional[List[Dict[str, Any]]] = None): + def __init__(self, mcp_configs: Optional[List[Dict[str, Any]]] = None, use_cache: bool = True): self.mcp_manager = mcp_service self.mcp_configs = mcp_configs or [] self._initialized = False self._schemas: Dict[str, List[ToolSchema]] = {} self._dynamic_tools = {} self._custom_tools = {} + self.use_cache = use_cache self.connection_manager = MCPConnectionManager() self.custom_handler = CustomMCPHandler(self.connection_manager) @@ -32,23 +132,109 @@ class MCPToolWrapper(Tool): self._initialized = True async def _initialize_servers(self): + start_time = time.time() + standard_configs = [cfg for cfg in self.mcp_configs if not cfg.get('isCustom', False)] custom_configs = [cfg for cfg in self.mcp_configs if cfg.get('isCustom', False)] + cached_configs = [] + cached_tools_data = [] + + initialization_tasks = [] + if standard_configs: - await self._initialize_standard_servers(standard_configs) + for config in standard_configs: + if self.use_cache: + cached_data = await _redis_cache.get(config) + if cached_data: + cached_configs.append(config.get('qualifiedName', 'Unknown')) + cached_tools_data.append(cached_data) + continue + + task = self._initialize_single_standard_server(config) + initialization_tasks.append(('standard', config, task)) if custom_configs: - await self.custom_handler.initialize_custom_mcps(custom_configs) + for config in custom_configs: + if self.use_cache: + cached_data = await _redis_cache.get(config) + if cached_data: + cached_configs.append(config.get('name', 'Unknown')) + cached_tools_data.append(cached_data) + continue + + task = self._initialize_single_custom_mcp(config) + initialization_tasks.append(('custom', config, task)) + + if cached_tools_data: + logger.info(f"⚡ Loaded {len(cached_configs)} MCP schemas from Redis cache: {', '.join(cached_configs)}") + for cached_data in cached_tools_data: + try: + if cached_data.get('type') == 'standard': + logger.debug("Standard MCP tools found in cache but require connection to restore") + elif cached_data.get('type') == 'custom': + custom_tools = cached_data.get('tools', {}) + if custom_tools: + self.custom_handler.custom_tools.update(custom_tools) + logger.debug(f"Restored {len(custom_tools)} custom tools from cache") + except Exception as e: + logger.warning(f"Failed to restore cached tools: {e}") + + if initialization_tasks: + logger.info(f"🚀 Initializing {len(initialization_tasks)} MCP servers in parallel (cache enabled: {self.use_cache})...") + + tasks = [task for _, _, task in initialization_tasks] + results = await asyncio.gather(*tasks, return_exceptions=True) + + successful = 0 + failed = 0 + + for i, result in enumerate(results): + task_type, config, _ = initialization_tasks[i] + if isinstance(result, Exception): + failed += 1 + config_name = config.get('name', config.get('qualifiedName', 'Unknown')) + logger.error(f"Failed to initialize MCP server '{config_name}': {result}") + else: + successful += 1 + if self.use_cache and result: + await _redis_cache.set(config, result) + + elapsed_time = time.time() - start_time + logger.info(f"⚡ MCP initialization completed in {elapsed_time:.2f}s - {successful} successful, {failed} failed, {len(cached_configs)} from cache") + else: + if cached_configs: + elapsed_time = time.time() - start_time + logger.info(f"⚡ All {len(cached_configs)} MCP schemas loaded from Redis cache in {elapsed_time:.2f}s - instant startup!") + else: + logger.info("No MCP servers to initialize") + + async def _initialize_single_standard_server(self, config: Dict[str, Any]): + try: + logger.debug(f"Connecting to standard MCP server: {config['qualifiedName']}") + await self.mcp_manager.connect_server(config) + logger.debug(f"✓ Connected to MCP server: {config['qualifiedName']}") + + tools_info = self.mcp_manager.get_all_tools_openapi() + return {'tools': tools_info, 'type': 'standard', 'timestamp': time.time()} + except Exception as e: + logger.error(f"✗ Failed to connect to MCP server {config['qualifiedName']}: {e}") + raise e + + async def _initialize_single_custom_mcp(self, config: Dict[str, Any]): + try: + logger.debug(f"Initializing custom MCP: {config.get('name', 'Unknown')}") + await self.custom_handler._initialize_single_custom_mcp(config) + logger.debug(f"✓ Initialized custom MCP: {config.get('name', 'Unknown')}") + + custom_tools = self.custom_handler.get_custom_tools() + return {'tools': custom_tools, 'type': 'custom', 'timestamp': time.time()} + except Exception as e: + logger.error(f"✗ Failed to initialize custom MCP {config.get('name', 'Unknown')}: {e}") + raise e async def _initialize_standard_servers(self, standard_configs: List[Dict[str, Any]]): - for config in standard_configs: - try: - logger.info(f"Attempting to connect to MCP server: {config['qualifiedName']}") - await self.mcp_manager.connect_server(config) - logger.info(f"Successfully connected to MCP server: {config['qualifiedName']}") - except Exception as e: - logger.error(f"Failed to connect to MCP server {config['qualifiedName']}: {e}") + pass async def _create_dynamic_tools(self): try: @@ -77,7 +263,6 @@ class MCPToolWrapper(Tool): logger.info(f"Created {len(self._dynamic_tools)} dynamic MCP tool methods") - # Re-register schemas to pick up the dynamic methods self._register_schemas() logger.info(f"Re-registered schemas after creating dynamic tools - total: {len(self._schemas)}") diff --git a/backend/agent/tools/utils/custom_mcp_handler.py b/backend/agent/tools/utils/custom_mcp_handler.py index e33d4110..6d6a7a6c 100644 --- a/backend/agent/tools/utils/custom_mcp_handler.py +++ b/backend/agent/tools/utils/custom_mcp_handler.py @@ -12,18 +12,33 @@ from .mcp_connection_manager import MCPConnectionManager class CustomMCPHandler: def __init__(self, connection_manager: MCPConnectionManager): self.connection_manager = connection_manager - self.custom_tools: Dict[str, Dict[str, Any]] = {} + self.custom_tools = {} async def initialize_custom_mcps(self, custom_configs: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: + initialization_tasks = [] + for config in custom_configs: - try: - await self._initialize_single_custom_mcp(config) - except Exception as e: - logger.error(f"Failed to initialize custom MCP {config.get('name', 'Unknown')}: {e}") - continue + task = self._initialize_single_custom_mcp_safe(config) + initialization_tasks.append(task) + + if initialization_tasks: + logger.info(f"Initializing {len(initialization_tasks)} custom MCPs in parallel...") + results = await asyncio.gather(*initialization_tasks, return_exceptions=True) + + for i, result in enumerate(results): + if isinstance(result, Exception): + config_name = custom_configs[i].get('name', 'Unknown') + logger.error(f"Failed to initialize custom MCP {config_name}: {result}") return self.custom_tools + async def _initialize_single_custom_mcp_safe(self, config: Dict[str, Any]): + try: + await self._initialize_single_custom_mcp(config) + except Exception as e: + logger.error(f"Failed to initialize custom MCP {config.get('name', 'Unknown')}: {e}") + return e + async def _initialize_single_custom_mcp(self, config: Dict[str, Any]): custom_type = config.get('customType', 'sse') server_config = config.get('config', {}) diff --git a/backend/templates/api.py b/backend/templates/api.py index 9a5afd00..5a6678c2 100644 --- a/backend/templates/api.py +++ b/backend/templates/api.py @@ -419,10 +419,7 @@ async def get_my_templates( logger.info(f"Retrieved {len(templates)} templates for user {user_id}") return [ - TemplateResponse( - **format_template_for_response(template), - creator_name=None - ) + TemplateResponse(**format_template_for_response(template)) for template in templates ] @@ -448,10 +445,7 @@ async def get_template( logger.info(f"User {user_id} accessing template {template_id}") - return TemplateResponse( - **format_template_for_response(template), - creator_name=None - ) + return TemplateResponse(**format_template_for_response(template)) except HTTPException: # Re-raise HTTP exceptions from our validation functions diff --git a/backend/templates/template_service.py b/backend/templates/template_service.py index dba10ed2..6010d2dc 100644 --- a/backend/templates/template_service.py +++ b/backend/templates/template_service.py @@ -44,6 +44,7 @@ class AgentTemplate: avatar: Optional[str] = None avatar_color: Optional[str] = None metadata: ConfigType = field(default_factory=dict) + creator_name: Optional[str] = None def with_public_status(self, is_public: bool, published_at: Optional[datetime] = None) -> 'AgentTemplate': return AgentTemplate( @@ -82,10 +83,8 @@ class AgentTemplate: mcp_type = mcp.get('type', 'sse') mcp_name = mcp['name'] - # Use explicitly stored qualified name (should always be present after sanitization) - qualified_name = mcp.get('mcp_qualified_name') or mcp.get('qualifiedName') # fallback for old format + qualified_name = mcp.get('mcp_qualified_name') or mcp.get('qualifiedName') if not qualified_name: - # Fallback for legacy templates (should rarely be needed) if mcp_type == 'pipedream': app_slug = mcp.get('app_slug') or mcp.get('config', {}).get('headers', {}).get('x-pd-app-slug') if not app_slug: @@ -98,7 +97,6 @@ class AgentTemplate: safe_name = mcp_name.replace(' ', '_').lower() qualified_name = f"custom_{mcp_type}_{safe_name}" - # Determine required config based on type if mcp_type in ['pipedream', 'composio']: required_config = [] elif mcp_type in ['http', 'sse', 'json']: @@ -192,6 +190,15 @@ class TemplateService: if not result.data: return None + creator_id = result.data['creator_id'] + creator_result = await client.schema('basejump').from_('accounts').select('id, name, slug').eq('id', creator_id).execute() + + creator_name = None + if creator_result.data: + account = creator_result.data[0] + creator_name = account.get('name') or account.get('slug') + + result.data['creator_name'] = creator_name return self._map_to_template(result.data) async def get_user_templates(self, creator_id: str) -> List[AgentTemplate]: @@ -201,7 +208,22 @@ class TemplateService: .order('created_at', desc=True)\ .execute() - return [self._map_to_template(data) for data in result.data] + if not result.data: + return [] + + creator_result = await client.schema('basejump').from_('accounts').select('id, name, slug').eq('id', creator_id).execute() + + creator_name = None + if creator_result.data: + account = creator_result.data[0] + creator_name = account.get('name') or account.get('slug') + + templates = [] + for template_data in result.data: + template_data['creator_name'] = creator_name + templates.append(self._map_to_template(template_data)) + + return templates async def get_public_templates(self) -> List[AgentTemplate]: client = await self._db.client @@ -211,7 +233,24 @@ class TemplateService: .order('marketplace_published_at', desc=True)\ .execute() - return [self._map_to_template(data) for data in result.data] + if not result.data: + return [] + + creator_ids = list(set(template['creator_id'] for template in result.data)) + accounts_result = await client.schema('basejump').from_('accounts').select('id, name, slug').in_('id', creator_ids).execute() + + creator_names = {} + if accounts_result.data: + for account in accounts_result.data: + creator_names[account['id']] = account.get('name') or account.get('slug') + + templates = [] + for template_data in result.data: + creator_name = creator_names.get(template_data['creator_id']) + template_data['creator_name'] = creator_name + templates.append(self._map_to_template(template_data)) + + return templates async def publish_template(self, template_id: str, creator_id: str) -> bool: logger.info(f"Publishing template {template_id}") @@ -450,6 +489,8 @@ class TemplateService: await client.table('agent_templates').insert(template_data).execute() def _map_to_template(self, data: Dict[str, Any]) -> AgentTemplate: + creator_name = data.get('creator_name') + return AgentTemplate( template_id=data['template_id'], creator_id=data['creator_id'], @@ -464,7 +505,8 @@ class TemplateService: updated_at=datetime.fromisoformat(data['updated_at'].replace('Z', '+00:00')), avatar=data.get('avatar'), avatar_color=data.get('avatar_color'), - metadata=data.get('metadata', {}) + metadata=data.get('metadata', {}), + creator_name=creator_name ) def get_template_service(db_connection: DBConnection) -> TemplateService: diff --git a/backend/templates/utils.py b/backend/templates/utils.py index c9259320..a8a1eb53 100644 --- a/backend/templates/utils.py +++ b/backend/templates/utils.py @@ -133,7 +133,8 @@ def format_template_for_response(template: AgentTemplate) -> Dict[str, Any]: 'updated_at': template.updated_at.isoformat(), 'avatar': template.avatar, 'avatar_color': template.avatar_color, - 'metadata': template.metadata + 'metadata': template.metadata, + 'creator_name': template.creator_name } diff --git a/backend/triggers/execution_service.py b/backend/triggers/execution_service.py index dd94c500..411f1ac1 100644 --- a/backend/triggers/execution_service.py +++ b/backend/triggers/execution_service.py @@ -468,8 +468,13 @@ class WorkflowExecutor: } for tool_key, tool_names in tool_mapping.items(): - if agentpress_tools.get(tool_key, {}).get('enabled', False): - available_tools.extend(tool_names) + tool_config = agentpress_tools.get(tool_key, False) + if isinstance(tool_config, bool): + if tool_config: + available_tools.extend(tool_names) + elif isinstance(tool_config, dict): + if tool_config.get('enabled', False): + available_tools.extend(tool_names) all_mcps = [] if agent_config.get('configured_mcps'): diff --git a/frontend/src/app/(dashboard)/agents/config/[agentId]/page.tsx b/frontend/src/app/(dashboard)/agents/config/[agentId]/page.tsx index e670057c..d34c7e60 100644 --- a/frontend/src/app/(dashboard)/agents/config/[agentId]/page.tsx +++ b/frontend/src/app/(dashboard)/agents/config/[agentId]/page.tsx @@ -504,12 +504,12 @@ export default function AgentConfigurationPage() { > {isSaving ? ( <> - + Saving... ) : ( <> - + Save )} diff --git a/frontend/src/app/(dashboard)/agents/config/[agentId]/workflow/[workflowId]/page.tsx b/frontend/src/app/(dashboard)/agents/config/[agentId]/workflow/[workflowId]/page.tsx index 3d5db299..0b866a46 100644 --- a/frontend/src/app/(dashboard)/agents/config/[agentId]/workflow/[workflowId]/page.tsx +++ b/frontend/src/app/(dashboard)/agents/config/[agentId]/workflow/[workflowId]/page.tsx @@ -13,34 +13,30 @@ import { useAgentTools } from '@/hooks/react-query/agents/use-agent-tools'; import { useAgent } from '@/hooks/react-query/agents/use-agents'; import { ConditionalStep } from '@/components/agents/workflows/conditional-workflow-builder'; import { WorkflowBuilder } from '@/components/workflows/workflow-builder'; +import { WorkflowExecutionDialog } from '@/components/workflows/workflow-execution-dialog'; const convertToNestedJSON = (steps: ConditionalStep[]): any[] => { - // Clean, simple conversion - preserve the exact structure with order field for validation let globalOrder = 1; const convertStepsWithNesting = (stepList: ConditionalStep[]): any[] => { return stepList.map((step) => { - // Build clean step object with required fields for backend validation const jsonStep: any = { - id: step.id, // CRITICAL: Always include ID + id: step.id, name: step.name, description: step.description, type: step.type, config: step.config || {}, - order: globalOrder++ // Required by backend validation + order: globalOrder++ }; - // Add conditional metadata if present if (step.type === 'condition' && step.conditions) { jsonStep.conditions = step.conditions; } - // Add parent relationship if present if (step.parentConditionalId) { jsonStep.parentConditionalId = step.parentConditionalId; } - // Add children if present if (step.children && step.children.length > 0) { jsonStep.children = convertStepsWithNesting(step.children); } @@ -245,6 +241,10 @@ export default function WorkflowPage() { const [triggerPhrase, setTriggerPhrase] = useState(''); const [isDefault, setIsDefault] = useState(false); const [steps, setSteps] = useState([]); + + // Execution state + const [isExecuteDialogOpen, setIsExecuteDialogOpen] = useState(false); + const [currentWorkflow, setCurrentWorkflow] = useState(null); // Wrapper for setSteps const setStepsWithDebug = useCallback((newSteps: ConditionalStep[]) => { @@ -345,6 +345,21 @@ export default function WorkflowPage() { } }, [workflowName, workflowDescription, triggerPhrase, isDefault, steps, agentId, workflowId, isEditing, createWorkflowMutation, updateWorkflowMutation, router]); + const handleExecute = useCallback(() => { + const workflow = workflows.find(w => w.id === workflowId); + if (workflow) { + setCurrentWorkflow(workflow); + setIsExecuteDialogOpen(true); + } else { + toast.error('Workflow not found or not saved yet'); + } + }, [workflows, workflowId]); + + const handleExecutionSuccess = useCallback(() => { + setIsExecuteDialogOpen(false); + setCurrentWorkflow(null); + }, []); + if (isLoading || isLoadingWorkflows) { return (
@@ -357,20 +372,33 @@ export default function WorkflowPage() { } return ( - + <> + + + + ); } \ No newline at end of file diff --git a/frontend/src/components/agents/custom-agents-page/agent-card.tsx b/frontend/src/components/agents/custom-agents-page/agent-card.tsx index 2e4c4ce8..e9b38839 100644 --- a/frontend/src/components/agents/custom-agents-page/agent-card.tsx +++ b/frontend/src/components/agents/custom-agents-page/agent-card.tsx @@ -1,7 +1,7 @@ 'use client'; import React from 'react'; -import { Download, CheckCircle, Loader2, Globe, GlobeLock, GitBranch, Trash2, MoreVertical } from 'lucide-react'; +import { Download, CheckCircle, Loader2, Globe, GlobeLock, GitBranch, Trash2, MoreVertical, User } from 'lucide-react'; import { Badge } from '@/components/ui/badge'; import { Button } from '@/components/ui/button'; import { @@ -147,7 +147,11 @@ const AgentBadges: React.FC<{ agent: AgentData, isSunaAgent: boolean }> = ({ age ); const MarketplaceMetadata: React.FC<{ data: MarketplaceData }> = ({ data }) => ( -
+
+
+ + {data.creator_name || 'Anonymous'} +
{data.download_count} installs diff --git a/frontend/src/components/agents/workflows/agent-workflows-configuration.tsx b/frontend/src/components/agents/workflows/agent-workflows-configuration.tsx index 8358c6d9..5d2abe23 100644 --- a/frontend/src/components/agents/workflows/agent-workflows-configuration.tsx +++ b/frontend/src/components/agents/workflows/agent-workflows-configuration.tsx @@ -2,14 +2,13 @@ import React, { useState, useCallback } from 'react'; import { useRouter } from 'next/navigation'; -import { Plus, AlertCircle, Workflow, Trash2, Calendar } from 'lucide-react'; +import { Plus, AlertCircle, Workflow, Trash2, Calendar, Play } from 'lucide-react'; import { Button } from '@/components/ui/button'; import { Card } from '@/components/ui/card'; import { Badge } from '@/components/ui/badge'; -import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from '@/components/ui/dialog'; + import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle } from '@/components/ui/alert-dialog'; -import { Label } from '@/components/ui/label'; -import { Textarea } from '@/components/ui/textarea'; +import { WorkflowExecutionDialog } from '@/components/workflows/workflow-execution-dialog'; import { Tabs, TabsContent } from '@/components/ui/tabs'; import { Alert, AlertDescription } from '@/components/ui/alert'; import { toast } from 'sonner'; @@ -17,8 +16,7 @@ import { useAgentWorkflows, useCreateAgentWorkflow, useUpdateAgentWorkflow, - useDeleteAgentWorkflow, - useExecuteWorkflow, + useDeleteAgentWorkflow } from '@/hooks/react-query/agents/use-agent-workflows'; import { AgentWorkflow @@ -36,7 +34,6 @@ export function AgentWorkflowsConfiguration({ agentId, agentName }: AgentWorkflo const createWorkflowMutation = useCreateAgentWorkflow(); const updateWorkflowMutation = useUpdateAgentWorkflow(); const deleteWorkflowMutation = useDeleteAgentWorkflow(); - const executeWorkflowMutation = useExecuteWorkflow(); const [isExecuteDialogOpen, setIsExecuteDialogOpen] = useState(false); const [workflowToExecute, setWorkflowToExecute] = useState(null); @@ -44,7 +41,7 @@ export function AgentWorkflowsConfiguration({ agentId, agentName }: AgentWorkflo const [workflowToDelete, setWorkflowToDelete] = useState(null); const [activeTab, setActiveTab] = useState('workflows'); - const [executionInput, setExecutionInput] = useState(''); + const handleCreateWorkflow = useCallback(async () => { try { @@ -111,27 +108,10 @@ export function AgentWorkflowsConfiguration({ agentId, agentName }: AgentWorkflo } }, [agentId, workflowToDelete, deleteWorkflowMutation]); - const handleConfirmExecution = useCallback(async () => { - if (!workflowToExecute) return; - - try { - const result = await executeWorkflowMutation.mutateAsync({ - agentId, - workflowId: workflowToExecute.id, - execution: { - input_data: executionInput.trim() ? { prompt: executionInput } : undefined - } - }); - - setIsExecuteDialogOpen(false); - setWorkflowToExecute(null); - setExecutionInput(''); - - toast.success(`${result.message}`); - } catch (error) { - toast.error('Failed to execute workflow'); - } - }, [agentId, workflowToExecute, executionInput, executeWorkflowMutation]); + const handleExecutionSuccess = useCallback((result: any) => { + setIsExecuteDialogOpen(false); + setWorkflowToExecute(null); + }, []); @@ -190,7 +170,7 @@ export function AgentWorkflowsConfiguration({ agentId, agentName }: AgentWorkflo {workflows.map((workflow) => (
handleWorkflowClick(workflow.id)} >
@@ -214,9 +194,9 @@ export function AgentWorkflowsConfiguration({ agentId, agentName }: AgentWorkflo e.stopPropagation(); handleExecuteWorkflow(workflow); }} - disabled={workflow.status !== 'active' || executeWorkflowMutation.isPending} + disabled={workflow.status !== 'active'} > - + Execute
- - - - Execute Workflow - - Provide input data for "{workflowToExecute?.name}" workflow - - -
-
- -