This commit is contained in:
marko-kraemer 2025-07-07 00:32:25 +02:00
parent c1c3f6cce1
commit 587dfccd78
1 changed files with 114 additions and 85 deletions

View File

@ -319,6 +319,7 @@ async def run_agent(
data = latest_user_message.data[0]['content']
if isinstance(data, str):
data = json.loads(data)
if trace:
trace.update(input=data['content'])
while continue_execution and iteration_count < max_iterations:
@ -329,6 +330,7 @@ async def run_agent(
can_run, message, subscription = await check_billing_status(client, account_id)
if not can_run:
error_msg = f"Billing limit reached: {message}"
if trace:
trace.event(name="billing_limit_reached", level="ERROR", status_message=(f"{error_msg}"))
# Yield a special message to indicate billing limit reached
yield {
@ -343,6 +345,7 @@ async def run_agent(
message_type = latest_message.data[0].get('type')
if message_type == 'assistant':
logger.info(f"Last message was from assistant, stopping execution")
if trace:
trace.event(name="last_message_from_assistant", level="DEFAULT", status_message=(f"Last message was from assistant, stopping execution"))
continue_execution = False
break
@ -383,6 +386,7 @@ async def run_agent(
"format": "image/jpeg"
}
})
if trace:
trace.event(name="screenshot_url_added_to_temporary_message", level="DEFAULT", status_message=(f"Screenshot URL added to temporary message."))
elif screenshot_base64:
# Fallback to base64 if URL not available
@ -392,16 +396,20 @@ async def run_agent(
"url": f"data:image/jpeg;base64,{screenshot_base64}",
}
})
if trace:
trace.event(name="screenshot_base64_added_to_temporary_message", level="WARNING", status_message=(f"Screenshot base64 added to temporary message. Prefer screenshot_url if available."))
else:
logger.warning("Browser state found but no screenshot data.")
if trace:
trace.event(name="browser_state_found_but_no_screenshot_data", level="WARNING", status_message=(f"Browser state found but no screenshot data."))
else:
logger.warning("Model is Gemini, Anthropic, or OpenAI, so not adding screenshot to temporary message.")
if trace:
trace.event(name="model_is_gemini_anthropic_or_openai", level="WARNING", status_message=(f"Model is Gemini, Anthropic, or OpenAI, so not adding screenshot to temporary message."))
except Exception as e:
logger.error(f"Error parsing browser state: {e}")
if trace:
trace.event(name="error_parsing_browser_state", level="ERROR", status_message=(f"{e}"))
# Get the latest image_context message (NEW)
@ -430,6 +438,7 @@ async def run_agent(
await client.table('messages').delete().eq('message_id', latest_image_context_msg.data[0]["message_id"]).execute()
except Exception as e:
logger.error(f"Error parsing image context: {e}")
if trace:
trace.event(name="error_parsing_image_context", level="ERROR", status_message=(f"{e}"))
# If we have any content, construct the temporary_message
@ -449,7 +458,7 @@ async def run_agent(
# Gemini 2.5 Pro has 64k max output tokens
max_tokens = 64000
generation = trace.generation(name="thread_manager.run_thread")
generation = trace.generation(name="thread_manager.run_thread") if trace else None
try:
# Make the LLM call and process the response
response = await thread_manager.run_thread(
@ -480,6 +489,7 @@ async def run_agent(
if isinstance(response, dict) and "status" in response and response["status"] == "error":
logger.error(f"Error response from run_thread: {response.get('message', 'Unknown error')}")
if trace:
trace.event(name="error_response_from_run_thread", level="ERROR", status_message=(f"{response.get('message', 'Unknown error')}"))
yield response
break
@ -490,12 +500,15 @@ async def run_agent(
# Process the response
error_detected = False
try:
full_response = ""
try:
# Check if response is iterable (async generator) or a dict (error case)
if hasattr(response, '__aiter__') and not isinstance(response, dict):
async for chunk in response:
# If we receive an error chunk, we should stop after this iteration
if isinstance(chunk, dict) and chunk.get('type') == 'status' and chunk.get('status') == 'error':
logger.error(f"Error chunk detected: {chunk.get('message', 'Unknown error')}")
if trace:
trace.event(name="error_chunk_detected", level="ERROR", status_message=(f"{chunk.get('message', 'Unknown error')}"))
error_detected = True
yield chunk # Forward the error chunk
@ -512,6 +525,7 @@ async def run_agent(
if metadata.get('agent_should_terminate'):
agent_should_terminate = True
logger.info("Agent termination signal detected in status message")
if trace:
trace.event(name="agent_termination_signal_detected", level="DEFAULT", status_message="Agent termination signal detected in status message")
# Extract the tool name from the status content if available
@ -551,27 +565,38 @@ async def run_agent(
last_tool_call = xml_tool
logger.info(f"Agent used XML tool: {xml_tool}")
if trace:
trace.event(name="agent_used_xml_tool", level="DEFAULT", status_message=(f"Agent used XML tool: {xml_tool}"))
except json.JSONDecodeError:
# Handle cases where content might not be valid JSON
logger.warning(f"Warning: Could not parse assistant content JSON: {chunk.get('content')}")
if trace:
trace.event(name="warning_could_not_parse_assistant_content_json", level="WARNING", status_message=(f"Warning: Could not parse assistant content JSON: {chunk.get('content')}"))
except Exception as e:
logger.error(f"Error processing assistant chunk: {e}")
if trace:
trace.event(name="error_processing_assistant_chunk", level="ERROR", status_message=(f"Error processing assistant chunk: {e}"))
yield chunk
else:
# Response is not iterable, likely an error dict
logger.error(f"Response is not iterable: {response}")
error_detected = True
# Check if we should stop based on the last tool call or error
if error_detected:
logger.info(f"Stopping due to error detected in response")
if trace:
trace.event(name="stopping_due_to_error_detected_in_response", level="DEFAULT", status_message=(f"Stopping due to error detected in response"))
if generation:
generation.end(output=full_response, status_message="error_detected", level="ERROR")
break
if agent_should_terminate or last_tool_call in ['ask', 'complete', 'web-browser-takeover']:
logger.info(f"Agent decided to stop with tool: {last_tool_call}")
if trace:
trace.event(name="agent_decided_to_stop_with_tool", level="DEFAULT", status_message=(f"Agent decided to stop with tool: {last_tool_call}"))
if generation:
generation.end(output=full_response, status_message="agent_stopped")
continue_execution = False
@ -579,7 +604,9 @@ async def run_agent(
# Just log the error and re-raise to stop all iterations
error_msg = f"Error during response streaming: {str(e)}"
logger.error(f"Error: {error_msg}")
if trace:
trace.event(name="error_during_response_streaming", level="ERROR", status_message=(f"Error during response streaming: {str(e)}"))
if generation:
generation.end(output=full_response, status_message=error_msg, level="ERROR")
yield {
"type": "status",
@ -593,6 +620,7 @@ async def run_agent(
# Just log the error and re-raise to stop all iterations
error_msg = f"Error running thread: {str(e)}"
logger.error(f"Error: {error_msg}")
if trace:
trace.event(name="error_running_thread", level="ERROR", status_message=(f"Error running thread: {str(e)}"))
yield {
"type": "status",
@ -601,6 +629,7 @@ async def run_agent(
}
# Stop execution immediately on any error
break
if generation:
generation.end(output=full_response)
langfuse.flush()