diff --git a/backend/webhooks/api.py b/backend/webhooks/api.py
index 47d50cec..ff6671b1 100644
--- a/backend/webhooks/api.py
+++ b/backend/webhooks/api.py
@@ -196,6 +196,8 @@ async def trigger_workflow_webhook(
"metadata": {
"workflow_id": workflow.id,
"workflow_name": workflow.name,
+ "is_workflow_execution": True,
+ "workflow_run_name": f"Workflow Run: {workflow.name}",
"triggered_by": "WEBHOOK",
"execution_id": execution_id
}
diff --git a/backend/workflows/api.py b/backend/workflows/api.py
index 9de4c6e5..cde134a9 100644
--- a/backend/workflows/api.py
+++ b/backend/workflows/api.py
@@ -66,7 +66,8 @@ async def _create_workflow_thread_for_api(
"metadata": {
"workflow_id": workflow.id,
"workflow_name": workflow.name,
- "is_workflow_execution": True
+ "is_workflow_execution": True,
+ "workflow_run_name": f"Workflow Run: {workflow.name}"
},
"created_at": datetime.now(timezone.utc).isoformat()
}
diff --git a/backend/workflows/converter.py b/backend/workflows/converter.py
index f9b79b54..565e201f 100644
--- a/backend/workflows/converter.py
+++ b/backend/workflows/converter.py
@@ -60,6 +60,42 @@ class WorkflowConverter:
logger.info(f"Final enabled_tools list: {enabled_tools}")
+ # Extract model from input node configuration, default to Claude 3.5 Sonnet
+ selected_model = "anthropic/claude-3-5-sonnet-latest"
+ if input_config:
+ # Look for model in input node data
+ for node in nodes:
+ if node.get('type') == 'inputNode':
+ node_data = node.get('data', {})
+ if node_data.get('model'):
+ selected_model = node_data['model']
+ # Ensure the model ID has the correct format
+ if not selected_model.startswith(('anthropic/', 'openai/', 'google/', 'meta-llama/', 'mistralai/', 'deepseek/')):
+ # Map common model names to their full IDs
+ model_mapping = {
+ 'claude-sonnet-4': 'anthropic/claude-sonnet-4-20250514',
+ 'claude-sonnet-3.7': 'anthropic/claude-3-7-sonnet-latest',
+ 'claude-3.5': 'anthropic/claude-3-5-sonnet-latest',
+ 'claude-3-5-sonnet-latest': 'anthropic/claude-3-5-sonnet-latest',
+ 'claude-3-5-sonnet-20241022': 'anthropic/claude-3-5-sonnet-20241022',
+ 'claude-3-5-haiku-latest': 'anthropic/claude-3-5-haiku-latest',
+ 'gpt-4o': 'openai/gpt-4o',
+ 'gpt-4o-mini': 'openai/gpt-4o-mini',
+ 'gpt-4.1': 'openai/gpt-4.1',
+ 'gpt-4.1-mini': 'gpt-4.1-mini',
+ 'deepseek-chat': 'openrouter/deepseek/deepseek-chat',
+ 'deepseek': 'openrouter/deepseek/deepseek-chat',
+ 'deepseek-r1': 'openrouter/deepseek/deepseek-r1',
+ 'gemini-2.0-flash-exp': 'google/gemini-2.0-flash-exp',
+ 'gemini-flash-2.5': 'openrouter/google/gemini-2.5-flash-preview-05-20',
+ 'gemini-2.5-flash:thinking': 'openrouter/google/gemini-2.5-flash-preview-05-20:thinking',
+ 'gemini-2.5-pro-preview': 'openrouter/google/gemini-2.5-pro-preview',
+ 'gemini-2.5-pro': 'openrouter/google/gemini-2.5-pro-preview',
+ 'qwen3': 'openrouter/qwen/qwen3-235b-a22b'
+ }
+ selected_model = model_mapping.get(selected_model, f"anthropic/{selected_model}")
+ break
+
agent_step = WorkflowStep(
id="main_agent_step",
name="Workflow Agent",
@@ -69,7 +105,7 @@ class WorkflowConverter:
"tool_name": "workflow_agent",
"system_prompt": workflow_prompt,
"agent_id": metadata.get("agent_id"),
- "model": "anthropic/claude-3-5-sonnet-latest",
+ "model": selected_model,
"max_iterations": 10,
"input_prompt": input_config.prompt if input_config else "",
"tools": enabled_tools,
@@ -254,21 +290,33 @@ class WorkflowConverter:
"""Generate a comprehensive system prompt that describes the workflow."""
prompt_parts = [
- "You are an AI agent executing a workflow. Follow these instructions carefully:",
+ f"You are Suna - Workflows, an autonomous AI Agent created by the Kortix team, specialized in executing visual workflows.",
+ "",
+ "# 1. CORE IDENTITY & CAPABILITIES",
+ "You are a workflow-specialized autonomous agent capable of executing complex visual workflows across domains including information gathering, content creation, software development, data analysis, and problem-solving. You operate within the Suna platform and have access to a comprehensive toolkit for workflow execution.",
+ "",
+ "# 2. EXECUTION ENVIRONMENT",
+ "",
+ "## 2.1 SYSTEM INFORMATION",
+ f"- CURRENT YEAR: 2025",
+ f"- UTC DATE: {datetime.now().strftime('%Y-%m-%d')}",
+ f"- UTC TIME: {datetime.now().strftime('%H:%M:%S')}",
+ "- TIME CONTEXT: When searching for latest news or time-sensitive information, ALWAYS use these current date/time values as reference points. Never use outdated information or assume different dates.",
+ "- PLATFORM: Suna - Workflows",
"",
]
# Add input prompt if available
if input_config and input_config.prompt:
prompt_parts.extend([
- "## Workflow Input Prompt",
+ "# 3. WORKFLOW INPUT PROMPT",
input_config.prompt,
"",
])
prompt_parts.extend([
- "## Workflow Overview",
- "This workflow was created visually and consists of the following components:",
+ "# 4. WORKFLOW OVERVIEW",
+ "This workflow was created visually in Suna and consists of the following components:",
""
])
@@ -303,7 +351,7 @@ class WorkflowConverter:
# Add trigger information
if input_config:
prompt_parts.extend([
- "## Trigger Configuration",
+ "# 5. TRIGGER CONFIGURATION",
f"**Trigger Type**: {input_config.trigger_type}",
])
@@ -327,17 +375,7 @@ class WorkflowConverter:
prompt_parts.append("")
prompt_parts.extend([
- "## Execution Instructions",
- "",
- "Execute this workflow by following these steps:",
- "1. Start with the input or trigger conditions",
- "2. Process each component in the logical order defined by the connections",
- "3. Use the available tools as specified in the workflow",
- "4. Follow the data flow between components",
- "5. Provide clear output at each step",
- "6. Handle errors gracefully and provide meaningful feedback",
- "",
- "## Available Tools",
+ "# 6. AVAILABLE TOOLS",
"You have access to the following tools based on the workflow configuration:"
])
@@ -405,7 +443,7 @@ class WorkflowConverter:
if mcp_tool_descriptions:
prompt_parts.extend([
"",
- "### MCP Server Tools",
+ "## MCP Server Tools",
"The following tools are available from MCP servers:"
])
prompt_parts.extend(mcp_tool_descriptions)
@@ -416,7 +454,7 @@ class WorkflowConverter:
if xml_examples:
prompt_parts.extend([
"",
- "## Tool Usage Examples",
+ "# 7. TOOL USAGE EXAMPLES",
"Use the following XML format to call tools. Each tool call must be wrapped in
+ {allModels.find(m => m.id === currentModel)?.label || currentModel || "Claude Sonnet 4"} +
+@@ -162,6 +188,23 @@ const InputNode = memo(({ data, selected, id }: NodeProps) => { />