diff --git a/backend/agent/run.py b/backend/agent/run.py index bf8d33e1..8722387c 100644 --- a/backend/agent/run.py +++ b/backend/agent/run.py @@ -5,7 +5,7 @@ from agent.tools.files_tool import FilesTool from agent.tools.terminal_tool import TerminalTool from agent.tools.wait_tool import WaitTool # from agent.tools.search_tool import CodeSearchTool -from typing import AsyncGenerator, Optional, Union, Dict, Any +from typing import Optional from agent.test_prompt import get_system_prompt from agentpress.response_processor import ProcessorConfig from dotenv import load_dotenv diff --git a/backend/agent/workspace/index.html b/backend/agent/workspace/index.html deleted file mode 100644 index 92b9931a..00000000 --- a/backend/agent/workspace/index.html +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - Adam Cohen Hillel | Creative Developer - - - - - -
- -
- -
-
-
-

Adam Cohen Hillel

-

Digital Alchemist & Code Artist

-

Turning caffeine into code & dreams into pixels

-
-
- -
-
-

The Plot Twist

-
-
-

👋 Yo! I'm Adam – part code wizard, part digital troublemaker. When I'm not pushing pixels around or making browsers do backflips, I'm probably dreaming up the next big thing that'll make your users go "Holy sh*t!"

-

I don't just build websites – I craft digital experiences that punch mediocrity in the face. Think of me as your technical creative director who actually knows how to code. Wild, right?

-
-
-
-
- -
-
-

The Magic Happens Here

-
-
- -

Creative Coding

-

Making pixels dance and interfaces sing. Your website won't just exist – it'll perform.

-
-
- -

Digital Innovation

-

Breaking conventions and building tomorrow's web experiences today. Because normal is boring.

-
-
- -

Performance Art

-

Crafting lightning-fast, butter-smooth experiences that'll make your users' jaws drop.

-
-
-
-
- -
-
-

Let's Create Something Legendary

-
- -
-
- - - - -
-
-
-
-
-
- - - - \ No newline at end of file diff --git a/backend/agent/workspace/styles.css b/backend/agent/workspace/styles.css deleted file mode 100644 index a33c9521..00000000 --- a/backend/agent/workspace/styles.css +++ /dev/null @@ -1,272 +0,0 @@ -/* Reset and Base Styles */ -* { - margin: 0; - padding: 0; - box-sizing: border-box; -} - -:root { - --primary-color: #ff0099; - --secondary-color: #00ff99; - --accent-color: #6e00ff; - --dark: #121212; - --light: #f8f9fa; -} - -body { - font-family: 'Poppins', sans-serif; - line-height: 1.6; - color: var(--dark); - background-color: var(--dark); - overflow-x: hidden; -} - -.container { - max-width: 1200px; - margin: 0 auto; - padding: 0 20px; -} - -/* Header and Navigation */ -header { - background: rgba(18, 18, 18, 0.95); - backdrop-filter: blur(10px); - box-shadow: 0 4px 30px rgba(255, 0, 153, 0.2); - position: fixed; - width: 100%; - top: 0; - z-index: 1000; -} - -nav ul { - display: flex; - justify-content: center; - list-style: none; - padding: 20px 0; -} - -nav ul li { - margin: 0 20px; - position: relative; -} - -nav ul li a { - text-decoration: none; - color: var(--light); - font-weight: 500; - transition: all 0.3s ease; - padding: 8px 16px; - border-radius: 20px; - text-transform: uppercase; - letter-spacing: 1px; -} - -nav ul li a:hover { - color: var(--dark); - background: linear-gradient(45deg, var(--primary-color), var(--secondary-color)); - box-shadow: 0 0 20px rgba(255, 0, 153, 0.4); -} - -/* Hero Section */ -#hero { - height: 100vh; - background: linear-gradient(135deg, var(--dark) 0%, #1a1a1a 100%); - display: flex; - align-items: center; - justify-content: center; - text-align: center; - position: relative; - overflow: hidden; -} - -#hero::before { - content: ''; - position: absolute; - width: 200%; - height: 200%; - background: radial-gradient(circle, var(--primary-color) 1%, transparent 10%); - background-size: 50px 50px; - animation: moveBackground 20s linear infinite; - opacity: 0.1; -} - -.hero-content { - position: relative; - z-index: 2; -} - -.hero-content h1 { - font-size: 5rem; - margin-bottom: 20px; - color: var(--light); - text-shadow: 0 0 20px var(--primary-color); - animation: glitch 5s infinite; -} - -.tagline { - font-size: 2rem; - color: var(--secondary-color); - margin-bottom: 15px; - text-transform: uppercase; - letter-spacing: 3px; -} - -.sub-tagline { - font-size: 1.2rem; - color: var(--primary-color); - opacity: 0.8; -} - -/* About Section */ -#about { - background-color: var(--dark); - color: var(--light); -} - -.about-content { - background: rgba(255, 255, 255, 0.05); - border: 2px solid var(--primary-color); - border-radius: 20px; - padding: 40px; - box-shadow: 0 0 30px rgba(255, 0, 153, 0.2); -} - -.about-text p { - font-size: 1.2rem; - margin-bottom: 20px; - line-height: 1.8; -} - -/* Experience Section */ -.experience-card { - background: rgba(255, 255, 255, 0.05); - border: 2px solid var(--secondary-color); - padding: 40px; - border-radius: 20px; - transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275); -} - -.experience-card:hover { - transform: translateY(-15px) scale(1.05); - box-shadow: 0 0 30px rgba(0, 255, 153, 0.3); -} - -.experience-card i { - font-size: 3.5rem; - background: linear-gradient(45deg, var(--primary-color), var(--secondary-color)); - -webkit-background-clip: text; - color: transparent; - margin-bottom: 20px; -} - -.experience-card h3 { - color: var(--light); - margin-bottom: 15px; -} - -.experience-card p { - color: rgba(255, 255, 255, 0.7); -} - -/* Contact Section */ -.contact-content { - background: rgba(255, 255, 255, 0.05); - border: 2px solid var(--accent-color); - padding: 40px; - border-radius: 20px; -} - -.social-link { - font-size: 2.5rem; - color: var(--light); - margin: 0 20px; - transition: all 0.3s ease; -} - -.social-link:hover { - color: var(--primary-color); - transform: translateY(-5px) scale(1.2); -} - -.contact-form input, -.contact-form textarea { - background: rgba(255, 255, 255, 0.05); - border: 2px solid var(--primary-color); - color: var(--light); - padding: 15px; - border-radius: 10px; - margin-bottom: 20px; - transition: all 0.3s ease; -} - -.contact-form input:focus, -.contact-form textarea:focus { - border-color: var(--secondary-color); - box-shadow: 0 0 20px rgba(0, 255, 153, 0.2); -} - -.contact-form button { - background: linear-gradient(45deg, var(--primary-color), var(--secondary-color)); - color: var(--dark); - font-weight: bold; - padding: 15px 40px; - border: none; - border-radius: 25px; - cursor: pointer; - font-size: 1.1rem; - transition: all 0.3s ease; - text-transform: uppercase; - letter-spacing: 2px; -} - -.contact-form button:hover { - transform: translateY(-5px); - box-shadow: 0 0 30px rgba(255, 0, 153, 0.4); -} - -/* Footer */ -footer { - background: var(--dark); - color: var(--light); - text-align: center; - padding: 30px 0; - border-top: 4px solid var(--primary-color); -} - -/* Animations */ -@keyframes glitch { - 0% { text-shadow: 0 0 20px var(--primary-color); } - 25% { text-shadow: -2px 0 var(--primary-color), 2px 0 var(--secondary-color); } - 50% { text-shadow: 2px 0 var(--secondary-color), -2px 0 var(--primary-color); } - 75% { text-shadow: 0 0 20px var(--secondary-color); } - 100% { text-shadow: 0 0 20px var(--primary-color); } -} - -@keyframes moveBackground { - 0% { transform: translate(0, 0); } - 100% { transform: translate(-50%, -50%); } -} - -/* Responsive Design */ -@media (max-width: 768px) { - .hero-content h1 { - font-size: 3rem; - } - - .tagline { - font-size: 1.5rem; - } - - nav ul { - flex-direction: column; - align-items: center; - } - - nav ul li { - margin: 10px 0; - } - - .experience-grid { - grid-template-columns: 1fr; - } -} \ No newline at end of file diff --git a/backend/agentpress/response_processor.py b/backend/agentpress/response_processor.py index ad4afd07..a8ab17d9 100644 --- a/backend/agentpress/response_processor.py +++ b/backend/agentpress/response_processor.py @@ -9,7 +9,6 @@ This module handles processing of LLM responses including: """ import json -import logging import asyncio import re import uuid @@ -28,7 +27,8 @@ ToolExecutionStrategy = Literal["sequential", "parallel"] @dataclass class ProcessorConfig: - """Configuration for response processing and tool execution. + """ + Configuration for response processing and tool execution. This class controls how the LLM's responses are processed, including how tool calls are detected, executed, and their results handled. @@ -41,11 +41,10 @@ class ProcessorConfig: tool_execution_strategy: How to execute multiple tools ("sequential" or "parallel") xml_adding_strategy: How to add XML tool results to the conversation """ - # Tool detection + xml_tool_calling: bool = True native_tool_calling: bool = False - - # Tool execution + execute_tools: bool = True execute_on_stream: bool = False tool_execution_strategy: ToolExecutionStrategy = "sequential" @@ -98,6 +97,9 @@ class ResponseProcessor: # For tracking tool results during streaming to add later tool_results_buffer = [] + # For tracking pending tool executions + pending_tool_executions = [] + logger.info(f"Starting to process streaming response for thread {thread_id}") logger.info(f"Config: XML={config.xml_tool_calling}, Native={config.native_tool_calling}, " f"Execute on stream={config.execute_on_stream}, Execution strategy={config.tool_execution_strategy}") @@ -131,19 +133,18 @@ class ResponseProcessor: # Parse and extract the tool call tool_call = self._parse_xml_tool_call(xml_chunk) if tool_call: - # Execute tool if needed, but store results for later + # Execute tool if needed, but in background if config.execute_tools and config.execute_on_stream: - result = await self._execute_tool(tool_call) + # Start tool execution as a background task + execution_task = asyncio.create_task(self._execute_tool(tool_call)) - # Store result to add after assistant message - tool_results_buffer.append((tool_call, result)) + # Store the task for later retrieval + pending_tool_executions.append({ + "task": execution_task, + "tool_call": tool_call + }) - # Yield tool execution result for client display - yield { - "type": "tool_result", - "name": tool_call["name"], - "result": str(result) - } + # Immediately continue processing more chunks # Process native tool calls if config.native_tool_calling and delta and hasattr(delta, 'tool_calls') and delta.tool_calls: @@ -190,18 +191,74 @@ class ResponseProcessor: "arguments": json.loads(current_tool['function']['arguments']), "id": current_tool['id'] } - result = await self._execute_tool(tool_call_data) - # Store result to add after assistant message - tool_results_buffer.append((tool_call_data, result)) + # Start tool execution as a background task + execution_task = asyncio.create_task(self._execute_tool(tool_call_data)) - # Yield tool execution result for client display - yield { - "type": "tool_result", - "name": tool_call_data["name"], - "result": str(result) - } + # Store the task for later retrieval + pending_tool_executions.append({ + "task": execution_task, + "tool_call": tool_call_data + }) + + # Immediately continue processing more chunks + # Check for completed tool executions + completed_executions = [] + for i, execution in enumerate(pending_tool_executions): + if execution["task"].done(): + try: + # Get the result + result = execution["task"].result() + tool_call = execution["tool_call"] + + # Store result for later database updates + tool_results_buffer.append((tool_call, result)) + + # Yield tool execution result for client display + yield { + "type": "tool_result", + "name": tool_call["name"], + "result": str(result) + } + + # Mark for removal + completed_executions.append(i) + + except Exception as e: + logger.error(f"Error getting tool execution result: {str(e)}") + + # Remove completed executions from pending list (in reverse to maintain indices) + for i in sorted(completed_executions, reverse=True): + pending_tool_executions.pop(i) + + # After streaming completes, wait for any remaining tool executions + if pending_tool_executions: + logger.info(f"Waiting for {len(pending_tool_executions)} pending tool executions to complete") + + # Wait for all pending tasks to complete + pending_tasks = [execution["task"] for execution in pending_tool_executions] + done, _ = await asyncio.wait(pending_tasks) + + # Process results + for execution in pending_tool_executions: + try: + if execution["task"].done(): + result = execution["task"].result() + tool_call = execution["tool_call"] + + # Store result for later + tool_results_buffer.append((tool_call, result)) + + # Yield tool execution result + yield { + "type": "tool_result", + "name": tool_call["name"], + "result": str(result) + } + except Exception as e: + logger.error(f"Error processing remaining tool execution: {str(e)}") + # After streaming completes, process any remaining content and tool calls if accumulated_content: # Extract final complete tool calls for native format diff --git a/backend/agentpress/thread_manager.py b/backend/agentpress/thread_manager.py index 55639182..6440699d 100644 --- a/backend/agentpress/thread_manager.py +++ b/backend/agentpress/thread_manager.py @@ -10,19 +10,14 @@ This module provides comprehensive conversation management, including: """ import json -import logging -import asyncio import uuid -import re from typing import List, Dict, Any, Optional, Type, Union, AsyncGenerator, Tuple, Callable, Literal from services.llm import make_llm_api_call from agentpress.tool import Tool, ToolResult from agentpress.tool_registry import ToolRegistry from agentpress.response_processor import ( ResponseProcessor, - ProcessorConfig, - XmlAddingStrategy, - ToolExecutionStrategy + ProcessorConfig ) from services.supabase import DBConnection from utils.logger import logger @@ -318,6 +313,15 @@ class ThreadManager: logger.debug(f"Processor config: XML={processor_config.xml_tool_calling}, Native={processor_config.native_tool_calling}, " f"Execute tools={processor_config.execute_tools}, Strategy={processor_config.tool_execution_strategy}") + # Check if native_tool_calling is enabled and throw an error if it is + if processor_config.native_tool_calling: + error_message = "Native tool calling is not supported in this version" + logger.error(error_message) + return { + "status": "error", + "message": error_message + } + # 4. Prepare tools for LLM call openapi_tool_schemas = None if processor_config.native_tool_calling: diff --git a/backend/backend/tests/__init__.py b/backend/backend/tests/__init__.py deleted file mode 100644 index e69de29b..00000000