This commit is contained in:
marko-kraemer 2025-04-08 00:38:18 +01:00
parent f29455d44e
commit 58ec53e4a5
8 changed files with 699 additions and 240 deletions

View File

@ -58,7 +58,7 @@ Current development environment workspace state:
llm_model=model_name,
llm_temperature=0.1,
llm_max_tokens=8000,
tool_choice="any",
tool_choice="auto",
processor_config=ProcessorConfig(
xml_tool_calling=False,
native_tool_calling=True,
@ -101,9 +101,7 @@ async def test_agent():
print(f"Error creating thread: {str(e)}")
return
print("\n" + "="*50)
print(f"🤖 Agent Thread Created: {thread_id}")
print("="*50 + "\n")
print(f"\n🤖 Agent Thread Created: {thread_id}\n")
# Interactive message input loop
while True:
@ -123,10 +121,7 @@ async def test_agent():
is_llm_message=True
)
# Run the agent and print results
print("\n" + "="*50)
print("🔄 Running agent...")
print("="*50 + "\n")
print("\n🔄 Running agent...\n")
chunk_counter = 0
current_response = ""
@ -135,65 +130,75 @@ async def test_agent():
async for chunk in run_agent(thread_id=thread_id, stream=True, thread_manager=thread_manager, native_max_auto_continues=25):
chunk_counter += 1
if chunk.get('type') == 'content':
current_response += chunk['content']
if chunk.get('type') == 'content' and 'content' in chunk:
current_response += chunk.get('content', '')
# Print the response as it comes in
print(chunk['content'], end='', flush=True)
print(chunk.get('content', ''), end='', flush=True)
elif chunk.get('type') == 'tool_result':
print("\n\n" + "="*50)
print(f"🛠️ Tool Result: {chunk.get('function_name', 'Unknown Tool')}")
print(f"📝 {chunk.get('result', chunk)}")
print("="*50 + "\n")
elif chunk.get('type') == 'tool_call_chunk':
# Add timestamp and format tool result nicely
tool_name = chunk.get('function_name', 'Tool')
result = chunk.get('result', '')
print(f"\n\n🛠️ TOOL RESULT [{tool_name}] → {result}")
elif chunk.get('type') == 'tool_call':
# Display native tool call chunks as they arrive
tool_call = chunk.get('tool_call', {})
# Check if it's a meaningful part of the tool call to display
if tool_call.get('function', {}).get('arguments'):
args = tool_call.get('function', {}).get('arguments', '')
args = tool_call.get('function', {}).get('arguments', '')
# Only show when we have substantial arguments or a function name
should_display = (
len(args) > 3 or # More than just '{}'
tool_call.get('function', {}).get('name') # Or we have a name
)
if should_display:
tool_call_counter += 1
tool_name = tool_call.get('function', {}).get('name', 'Building...')
# Only show when we have substantial arguments or a function name
should_display = (
len(args) > 3 or # More than just '{}'
tool_call.get('function', {}).get('name') # Or we have a name
)
# Print tool call header with counter and tool name
print(f"\n🔧 TOOL CALL #{tool_call_counter} [{tool_name}]")
if should_display:
tool_call_counter += 1
print("\n" + "-"*50)
print(f"🔧 Tool Call #{tool_call_counter}: {tool_call.get('function', {}).get('name', 'Building...')}")
# Try to parse and pretty print the arguments if they're JSON
try:
# Check if it's complete JSON or just a fragment
if args.strip().startswith('{') and args.strip().endswith('}'):
args_obj = json.loads(args)
print(f"📋 Arguments: {json.dumps(args_obj, indent=2)}")
else:
print(f"📋 Arguments (partial): {args}")
except json.JSONDecodeError:
print(f"📋 Arguments (building): {args}")
print("-"*50)
# Return to the current content display
if current_response:
print("\nContinuing response:", flush=True)
print(current_response, end='', flush=True)
# Try to parse and pretty print the arguments if they're JSON
try:
# Check if it's complete JSON or just a fragment
if args.strip().startswith('{') and args.strip().endswith('}'):
args_obj = json.loads(args)
# Only print non-empty args to reduce clutter
if args_obj and args_obj != {}:
# Format JSON with nice indentation and color indicators for readability
print(f" ARGS: {json.dumps(args_obj, indent=2)}")
else:
# Only print if there's actual content to show
if args.strip():
print(f" ARGS: {args}")
except json.JSONDecodeError:
if args.strip():
print(f" ARGS: {args}")
# Add a separator for visual clarity
print(" " + "-" * 40)
# Return to the current content display
if current_response:
print("\nContinuing response:", flush=True)
print(current_response, end='', flush=True)
elif chunk.get('type') == 'tool_status':
# Log tool status changes
status = chunk.get('status', '')
function_name = chunk.get('function_name', '')
if status and function_name:
status_emoji = "" if status == "completed" else "" if status == "started" else ""
print(f"\n{status_emoji} TOOL {status.upper()}: {function_name}")
elif chunk.get('type') == 'finish':
# Just log finish reason to console but don't show to user
finish_reason = chunk.get('finish_reason', 'unknown')
print(f"\n[Debug] Received finish_reason: {finish_reason}")
finish_reason = chunk.get('finish_reason', '')
if finish_reason:
print(f"\n📌 Finished: {finish_reason}")
print("\n" + "="*50)
print(f"✅ Agent completed. Processed {chunk_counter} chunks.")
if tool_call_counter > 0:
print(f"🔧 Found {tool_call_counter} native tool calls.")
print("="*50 + "\n")
print(f"\n\n✅ Agent run completed with {tool_call_counter} tool calls")
print("\n" + "="*50)
print("👋 Test completed. Goodbye!")
print("="*50 + "\n")
print("\n👋 Test completed. Goodbye!")
if __name__ == "__main__":
import asyncio

View File

@ -3,17 +3,18 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Modern Website</title>
<title>Creative Design Studio</title>
<link rel="stylesheet" href="styles.css">
</head>
<body>
<header>
<nav>
<div class="logo">MyWebsite</div>
<ul class="nav-links">
<div class="logo">Creative Studio</div>
<ul>
<li><a href="#home">Home</a></li>
<li><a href="#about">About</a></li>
<li><a href="#services">Services</a></li>
<li><a href="#about">About</a></li>
<li><a href="#portfolio">Portfolio</a></li>
<li><a href="#contact">Contact</a></li>
</ul>
</nav>
@ -21,49 +22,104 @@
<main>
<section id="home" class="hero">
<h1>Welcome to Our Website</h1>
<p>Discover amazing content and services</p>
<button class="cta-button">Get Started</button>
<h1>Welcome to Creative Design Studio</h1>
<p class="tagline">We bring your digital dreams to life</p>
<a href="#contact" class="cta-button">Get Started</a>
</section>
<section id="about" class="about">
<h2>About Us</h2>
<p>We are a passionate team dedicated to creating amazing web experiences.</p>
</section>
<section id="services" class="services">
<section id="services">
<h2>Our Services</h2>
<div class="service-cards">
<div class="card">
<div class="services-grid">
<div class="service-card">
<h3>Web Design</h3>
<p>Beautiful and responsive websites</p>
<p>Beautiful, responsive websites that engage your audience</p>
</div>
<div class="card">
<h3>Development</h3>
<p>Custom web applications</p>
<div class="service-card">
<h3>Branding</h3>
<p>Unique brand identity that sets you apart</p>
</div>
<div class="card">
<h3>SEO</h3>
<p>Search engine optimization</p>
<div class="service-card">
<h3>Digital Marketing</h3>
<p>Strategic campaigns that drive results</p>
</div>
</div>
</section>
<section id="contact" class="contact">
<section id="about">
<h2>About Us</h2>
<div class="about-content">
<p>We are a passionate team of designers and developers dedicated to creating exceptional digital experiences. With years of experience in the industry, we understand what it takes to make your brand stand out.</p>
<div class="stats">
<div class="stat-item">
<span class="number">100+</span>
<span class="label">Projects Completed</span>
</div>
<div class="stat-item">
<span class="number">50+</span>
<span class="label">Happy Clients</span>
</div>
<div class="stat-item">
<span class="number">5+</span>
<span class="label">Years Experience</span>
</div>
</div>
</div>
</section>
<section id="portfolio">
<h2>Our Work</h2>
<div class="portfolio-grid">
<div class="portfolio-item">Project 1</div>
<div class="portfolio-item">Project 2</div>
<div class="portfolio-item">Project 3</div>
<div class="portfolio-item">Project 4</div>
</div>
</section>
<section id="contact">
<h2>Contact Us</h2>
<form id="contact-form">
<input type="text" placeholder="Name" required>
<input type="email" placeholder="Email" required>
<textarea placeholder="Your message" required></textarea>
<button type="submit">Send Message</button>
</form>
<div class="contact-container">
<form class="contact-form">
<input type="text" placeholder="Your Name" required>
<input type="email" placeholder="Your Email" required>
<textarea placeholder="Your Message" required></textarea>
<button type="submit" class="submit-button">Send Message</button>
</form>
<div class="contact-info">
<p>Email: info@creativestudio.com</p>
<p>Phone: (555) 123-4567</p>
<p>Address: 123 Creative Street, Design City</p>
</div>
</div>
</section>
</main>
<footer>
<p>&copy; 2024 MyWebsite. All rights reserved.</p>
<div class="footer-content">
<div class="footer-section">
<h4>Creative Studio</h4>
<p>Creating digital excellence since 2019</p>
</div>
<div class="footer-section">
<h4>Quick Links</h4>
<ul>
<li><a href="#home">Home</a></li>
<li><a href="#services">Services</a></li>
<li><a href="#portfolio">Portfolio</a></li>
</ul>
</div>
<div class="footer-section">
<h4>Connect With Us</h4>
<div class="social-links">
<a href="#">Facebook</a>
<a href="#">Twitter</a>
<a href="#">Instagram</a>
</div>
</div>
</div>
<div class="footer-bottom">
<p>&copy; 2024 Creative Design Studio. All rights reserved.</p>
</div>
</footer>
<script src="script.js"></script>
</body>
</html>

View File

@ -1,73 +0,0 @@
// Smooth scrolling for navigation links
document.querySelectorAll('a[href^="#"]').forEach(anchor => {
anchor.addEventListener('click', function (e) {
e.preventDefault();
document.querySelector(this.getAttribute('href')).scrollIntoView({
behavior: 'smooth'
});
});
});
// Form submission handling
const contactForm = document.getElementById('contact-form');
if (contactForm) {
contactForm.addEventListener('submit', function(e) {
e.preventDefault();
// Get form data
const formData = new FormData(this);
const formObject = {};
formData.forEach((value, key) => formObject[key] = value);
// Here you would typically send the form data to a server
console.log('Form submitted:', formObject);
// Show success message
alert('Thank you for your message! We will get back to you soon.');
this.reset();
});
}
// Add animation on scroll for cards
const cards = document.querySelectorAll('.card');
const animateOnScroll = (entries) => {
entries.forEach(entry => {
if (entry.isIntersecting) {
entry.target.style.opacity = 1;
entry.target.style.transform = 'translateY(0)';
}
});
}
const observer = new IntersectionObserver(animateOnScroll, {
threshold: 0.1
});
cards.forEach(card => {
card.style.opacity = 0;
card.style.transform = 'translateY(20px)';
card.style.transition = 'all 0.5s ease-out';
observer.observe(card);
});
// Add active state to navigation links based on scroll position
window.addEventListener('scroll', () => {
const sections = document.querySelectorAll('section');
const navLinks = document.querySelectorAll('.nav-links a');
let current = '';
sections.forEach(section => {
const sectionTop = section.offsetTop;
if (window.pageYOffset >= sectionTop - 60) {
current = section.getAttribute('id');
}
});
navLinks.forEach(link => {
link.classList.remove('active');
if (link.getAttribute('href') === `#${current}`) {
link.classList.add('active');
}
});
});

View File

@ -1,4 +1,4 @@
/* Reset and base styles */
/* Reset and Base Styles */
* {
margin: 0;
padding: 0;
@ -13,7 +13,7 @@ body {
/* Navigation */
header {
background-color: #fff;
background: #fff;
box-shadow: 0 2px 5px rgba(0,0,0,0.1);
position: fixed;
width: 100%;
@ -33,45 +33,37 @@ nav {
.logo {
font-size: 1.5rem;
font-weight: bold;
color: #333;
color: #2c3e50;
}
.nav-links {
nav ul {
display: flex;
list-style: none;
gap: 2rem;
}
.nav-links li {
margin-left: 2rem;
}
.nav-links a {
nav a {
text-decoration: none;
color: #333;
transition: color 0.3s;
color: #2c3e50;
font-weight: 500;
transition: color 0.3s ease;
}
.nav-links a:hover {
color: #007bff;
nav a:hover {
color: #3498db;
}
/* Main content */
main {
margin-top: 60px;
}
section {
padding: 4rem 5%;
max-width: 1200px;
margin: 0 auto;
}
/* Hero section */
/* Hero Section */
.hero {
height: 100vh;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
text-align: center;
padding: 6rem 5%;
background: linear-gradient(135deg, #6366f1, #a855f7);
background: linear-gradient(135deg, #6c5ce7, #a363d9);
color: white;
padding: 0 1rem;
}
.hero h1 {
@ -79,109 +71,231 @@ section {
margin-bottom: 1rem;
}
.hero p {
font-size: 1.2rem;
.tagline {
font-size: 1.5rem;
margin-bottom: 2rem;
}
.cta-button {
padding: 1rem 2rem;
font-size: 1.1rem;
background-color: white;
color: #333;
border: none;
border-radius: 5px;
cursor: pointer;
transition: transform 0.3s;
background: #fff;
color: #6c5ce7;
text-decoration: none;
border-radius: 30px;
font-weight: bold;
transition: transform 0.3s ease;
}
.cta-button:hover {
transform: translateY(-2px);
transform: translateY(-3px);
}
/* Services section */
.service-cards {
/* Services Section */
#services {
padding: 5rem 1rem;
background: #f9f9f9;
}
.services-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 2rem;
margin-top: 2rem;
max-width: 1200px;
margin: 2rem auto;
}
.card {
.service-card {
background: white;
padding: 2rem;
background-color: white;
border-radius: 10px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
transition: transform 0.3s;
box-shadow: 0 3px 10px rgba(0,0,0,0.1);
transition: transform 0.3s ease;
}
.card:hover {
.service-card:hover {
transform: translateY(-5px);
}
/* Contact form */
.contact {
background-color: #f8f9fa;
/* About Section */
#about {
padding: 5rem 1rem;
max-width: 1200px;
margin: 0 auto;
}
#contact-form {
.about-content {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 3rem;
align-items: center;
}
.stats {
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 2rem;
text-align: center;
}
.stat-item .number {
font-size: 2.5rem;
font-weight: bold;
color: #6c5ce7;
display: block;
}
/* Portfolio Section */
#portfolio {
padding: 5rem 1rem;
background: #f9f9f9;
}
.portfolio-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 2rem;
max-width: 1200px;
margin: 2rem auto;
}
.portfolio-item {
height: 250px;
background: #ddd;
border-radius: 10px;
display: flex;
align-items: center;
justify-content: center;
font-weight: bold;
cursor: pointer;
transition: transform 0.3s ease;
}
.portfolio-item:hover {
transform: scale(1.05);
}
/* Contact Section */
#contact {
padding: 5rem 1rem;
max-width: 1200px;
margin: 0 auto;
}
.contact-container {
display: grid;
grid-template-columns: 2fr 1fr;
gap: 3rem;
}
.contact-form {
display: flex;
flex-direction: column;
max-width: 600px;
margin: 2rem auto;
gap: 1rem;
}
#contact-form input,
#contact-form textarea {
padding: 0.8rem;
.contact-form input,
.contact-form textarea {
padding: 1rem;
border: 1px solid #ddd;
border-radius: 5px;
font-size: 1rem;
}
#contact-form textarea {
.contact-form textarea {
height: 150px;
resize: vertical;
}
#contact-form button {
.submit-button {
padding: 1rem;
background-color: #007bff;
background: #6c5ce7;
color: white;
border: none;
border-radius: 5px;
cursor: pointer;
transition: background-color 0.3s;
font-size: 1rem;
transition: background 0.3s ease;
}
#contact-form button:hover {
background-color: #0056b3;
.submit-button:hover {
background: #5849c2;
}
/* Footer */
footer {
text-align: center;
padding: 2rem;
background-color: #333;
background: #2c3e50;
color: white;
padding: 3rem 1rem 1rem;
}
/* Responsive design */
.footer-content {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 2rem;
max-width: 1200px;
margin: 0 auto;
}
.footer-section h4 {
margin-bottom: 1rem;
}
.footer-section ul {
list-style: none;
}
.footer-section a {
color: white;
text-decoration: none;
margin-bottom: 0.5rem;
display: inline-block;
}
.social-links {
display: flex;
gap: 1rem;
}
.footer-bottom {
text-align: center;
margin-top: 2rem;
padding-top: 1rem;
border-top: 1px solid rgba(255,255,255,0.1);
}
/* Common Section Styles */
section {
padding: 4rem 1rem;
}
h2 {
text-align: center;
margin-bottom: 2rem;
color: #2c3e50;
}
/* Responsive Design */
@media (max-width: 768px) {
.nav-links {
display: none;
nav {
flex-direction: column;
gap: 1rem;
}
nav ul {
flex-direction: column;
text-align: center;
gap: 1rem;
}
.hero h1 {
font-size: 2rem;
}
.hero p {
font-size: 1rem;
.about-content,
.contact-container {
grid-template-columns: 1fr;
}
section {
padding: 2rem 5%;
.stats {
grid-template-columns: 1fr;
}
}

View File

@ -208,7 +208,7 @@ class ResponseProcessor:
# Yield the chunk data
yield {
"type": "tool_call_chunk",
"type": "content",
"tool_call": tool_call_data
}

View File

@ -0,0 +1,193 @@
"""
Raw streaming test to analyze tool call streaming behavior.
This script specifically tests how raw streaming chunks are delivered from the Anthropic API
with tool calls containing large JSON payloads.
"""
import asyncio
import json
import sys
import os
from typing import Dict, Any
from anthropic import AsyncAnthropic
from utils.logger import logger
# Example tool schema for Anthropic format
CREATE_FILE_TOOL = {
"name": "create_file",
"description": "Create a new file with the provided contents at a given path in the workspace",
"input_schema": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the file to be created"
},
"file_contents": {
"type": "string",
"description": "The content to write to the file"
}
},
"required": ["file_path", "file_contents"]
}
}
async def test_raw_streaming():
"""Test tool calling with streaming to observe raw chunk behavior using Anthropic SDK directly."""
# Setup conversation with a prompt likely to generate large file payloads
messages = [
{"role": "user", "content": "Create a CSS file with a comprehensive set of styles for a modern responsive website."}
]
print("\n=== Testing Raw Streaming Tool Call Behavior ===\n")
try:
# Get API key from environment
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key:
logger.error("ANTHROPIC_API_KEY environment variable not set")
return
# Initialize Anthropic client
client = AsyncAnthropic(api_key=api_key)
# Make API call with tool in streaming mode
print("Sending streaming request...")
stream = await client.messages.create(
model="claude-3-5-sonnet-latest",
max_tokens=4096,
temperature=0.0,
system="You are a helpful assistant with access to file management tools.",
messages=messages,
tools=[CREATE_FILE_TOOL],
tool_choice={"type": "tool", "name": "create_file"},
stream=True
)
# Process streaming response
print("\nResponse stream started. Processing raw chunks:\n")
# Stream statistics
chunk_count = 0
tool_call_chunks = 0
accumulated_tool_input = ""
current_tool_name = None
accumulated_content = ""
# Process each chunk with ZERO buffering
print("\n--- BEGINNING STREAM OUTPUT ---\n", flush=True)
sys.stdout.flush()
# Process each event in the stream
async for event in stream:
chunk_count += 1
# Immediate debug output for every chunk
print(f"\n[CHUNK {chunk_count}] Type: {event.type}", end="", flush=True)
sys.stdout.flush()
# Process based on event type
if event.type == "message_start":
print(f" Message ID: {event.message.id}", end="", flush=True)
elif event.type == "content_block_start":
print(f" Content block start: {event.content_block.type}", end="", flush=True)
elif event.type == "content_block_delta":
if hasattr(event.delta, "text") and event.delta.text:
text = event.delta.text
accumulated_content += text
print(f" Content: {repr(text)}", end="", flush=True)
elif event.type == "tool_use":
current_tool_name = event.tool_use.name
print(f" Tool use: {current_tool_name}", end="", flush=True)
# If input is available immediately
if hasattr(event.tool_use, "input") and event.tool_use.input:
tool_call_chunks += 1
input_json = json.dumps(event.tool_use.input)
input_len = len(input_json)
print(f" Input[{input_len}]: {input_json[:50]}...", end="", flush=True)
accumulated_tool_input = input_json
elif event.type == "tool_use_delta":
if hasattr(event.delta, "input") and event.delta.input:
tool_call_chunks += 1
# For streaming tool inputs, we get partial updates
# The delta.input is a dictionary with partial updates to specific fields
input_json = json.dumps(event.delta.input)
input_len = len(input_json)
print(f" Input delta[{input_len}]: {input_json[:50]}...", end="", flush=True)
# Try to merge the deltas
try:
if accumulated_tool_input:
# Parse existing accumulated JSON
existing_input = json.loads(accumulated_tool_input)
# Update with new delta
existing_input.update(event.delta.input)
accumulated_tool_input = json.dumps(existing_input)
else:
accumulated_tool_input = input_json
except json.JSONDecodeError:
# If we can't parse JSON yet, just append the raw delta
accumulated_tool_input += input_json
elif event.type == "message_delta":
if hasattr(event.delta, "stop_reason") and event.delta.stop_reason:
print(f"\n--- FINISH REASON: {event.delta.stop_reason} ---", flush=True)
elif event.type == "message_stop":
# Access stop_reason directly from the event
if hasattr(event, "stop_reason"):
print(f"\n--- MESSAGE STOP: {event.stop_reason} ---", flush=True)
else:
print("\n--- MESSAGE STOP ---", flush=True)
# Force flush after every chunk
sys.stdout.flush()
print("\n\n--- END STREAM OUTPUT ---\n", flush=True)
sys.stdout.flush()
# Summary after all chunks processed
print("\n=== Streaming Summary ===")
print(f"Total chunks: {chunk_count}")
print(f"Tool call chunks: {tool_call_chunks}")
if current_tool_name:
print(f"\nTool name: {current_tool_name}")
if accumulated_content:
print(f"\nAccumulated content:")
print(accumulated_content)
# Try to parse accumulated arguments as JSON
try:
if accumulated_tool_input:
print(f"\nTotal accumulated tool input length: {len(accumulated_tool_input)}")
input_obj = json.loads(accumulated_tool_input)
print(f"\nSuccessfully parsed accumulated tool input as JSON")
if 'file_path' in input_obj:
print(f"file_path: {input_obj['file_path']}")
if 'file_contents' in input_obj:
contents = input_obj['file_contents']
print(f"file_contents length: {len(contents)}")
print(f"file_contents preview: {contents[:100]}...")
except json.JSONDecodeError as e:
print(f"\nError parsing accumulated tool input: {e}")
print(f"Tool input start: {accumulated_tool_input[:100]}...")
print(f"Tool input end: {accumulated_tool_input[-100:]}")
except Exception as e:
logger.error(f"Error in streaming test: {str(e)}", exc_info=True)
async def main():
"""Run the raw streaming test."""
await test_raw_streaming()
if __name__ == "__main__":
asyncio.run(main())

View File

@ -19,6 +19,15 @@ interface ApiMessage {
type?: 'content' | 'tool_call';
name?: string;
arguments?: string;
tool_call?: {
id: string;
function: {
name: string;
arguments: string;
};
type: string;
index: number;
};
}
interface ApiAgentRun {
@ -47,6 +56,7 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
const [agentStatus, setAgentStatus] = useState<'idle' | 'running' | 'paused'>('idle');
const [isStreaming, setIsStreaming] = useState(false);
const [streamContent, setStreamContent] = useState('');
const [toolCallData, setToolCallData] = useState<{id?: string, name?: string, arguments?: string, index?: number} | null>(null);
const streamCleanupRef = useRef<(() => void) | null>(null);
const textareaRef = useRef<HTMLTextAreaElement | null>(null);
@ -89,6 +99,15 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
message?: string;
name?: string;
arguments?: string;
tool_call?: {
id: string;
function: {
name: string;
arguments: string;
};
type: string;
index: number;
};
} | null = null;
// Handle data: prefix format (SSE standard format)
@ -108,6 +127,9 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
// This will execute regardless of the normal flow
console.log(`[PAGE] 🚨 FORCE VERIFYING completion status for run: ${runId || 'unknown'}`);
// Reset tool call data on completion
setToolCallData(null);
// Immediately mark as not streaming to update UI
setIsStreaming(false);
setAgentStatus('idle');
@ -204,8 +226,31 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
return;
}
// Handle tool call chunks with data: prefix
if (jsonData?.type === 'content' && jsonData?.tool_call) {
console.log('[PAGE] Processing prefixed tool call chunk:', jsonData.tool_call);
const { id, function: toolFunction, type, index } = jsonData.tool_call;
// Update tool call data - accumulate arguments
setToolCallData(prev => ({
id,
name: toolFunction?.name,
arguments: prev && prev.id === id ?
(prev.arguments || '') + (toolFunction?.arguments || '') :
toolFunction?.arguments,
index
}));
// Don't update streamContent directly for tool calls
return;
}
// Handle regular content with data: prefix
if (jsonData?.type === 'content' && jsonData?.content) {
// Reset tool call data when switching to content
setToolCallData(null);
// For regular content, just append to the existing content
setStreamContent(prev => prev + jsonData?.content);
console.log('[PAGE] Added content from prefixed data:', jsonData?.content.substring(0, 30) + '...');
@ -246,6 +291,9 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
// Direct access verification - forcing immediate state update
console.log(`[PAGE] 🚨 FORCE VERIFYING completion status for standard message: ${runId || 'unknown'}`);
// Reset tool call data
setToolCallData(null);
// Immediately mark as not streaming to update UI
setIsStreaming(false);
setAgentStatus('idle');
@ -282,10 +330,27 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
}
// Skip empty messages
if (!jsonData?.content && !jsonData?.arguments) return;
if (!jsonData?.content && !jsonData?.arguments && !jsonData?.tool_call) return;
// Handle different message types
if (jsonData?.type === 'tool_call') {
if (jsonData?.type === 'content' && jsonData?.tool_call) {
console.log('[PAGE] Processing tool call chunk:', jsonData.tool_call);
const { id, function: toolFunction, type, index } = jsonData.tool_call;
// Update tool call data - accumulate arguments
setToolCallData(prev => ({
id,
name: toolFunction?.name,
arguments: prev && prev.id === id ?
(prev.arguments || '') + (toolFunction?.arguments || '') :
toolFunction?.arguments,
index
}));
// Don't update streamContent directly for tool calls
return;
} else if (jsonData?.type === 'tool_call') {
const toolContent = jsonData.name
? `Tool: ${jsonData.name}\n${jsonData.arguments || ''}`
: jsonData.arguments || '';
@ -294,6 +359,9 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
setStreamContent(prev => prev + (prev ? '\n' : '') + toolContent);
console.log('[PAGE] Added tool call content:', toolContent.substring(0, 30) + '...');
} else if (jsonData?.type === 'content' && jsonData?.content) {
// Reset tool call data when switching to content
setToolCallData(null);
// For regular content, just append to the existing content
setStreamContent(prev => prev + jsonData?.content);
console.log('[PAGE] Added content:', jsonData?.content.substring(0, 30) + '...');
@ -327,6 +395,9 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
setAgentStatus('idle');
setIsStreaming(false);
// Reset tool call data
setToolCallData(null);
try {
console.log(`[PAGE] Checking final status for agent run ${runId}`);
const status = await getAgentStatus(runId);
@ -847,8 +918,27 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
<div className="whitespace-pre-wrap break-words">
{message.type === 'tool_call' ? (
<div className="font-mono text-xs">
<div className="text-muted-foreground">Tool: {message.name}</div>
<div className="mt-1">{message.arguments}</div>
<div className="flex items-center gap-2 mb-1 text-muted-foreground">
<div className="flex h-4 w-4 items-center justify-center rounded-full bg-primary/10">
<div className="h-2 w-2 rounded-full bg-primary"></div>
</div>
<span>Tool: {message.name}</span>
</div>
<div className="mt-1 p-3 bg-secondary/20 rounded-md overflow-x-auto">
{message.arguments}
</div>
</div>
) : message.role === 'tool' ? (
<div className="font-mono text-xs">
<div className="flex items-center gap-2 mb-1 text-muted-foreground">
<div className="flex h-4 w-4 items-center justify-center rounded-full bg-success/10">
<div className="h-2 w-2 rounded-full bg-success"></div>
</div>
<span>Tool Result: {message.name}</span>
</div>
<div className="mt-1 p-3 bg-success/5 rounded-md">
{message.content}
</div>
</div>
) : (
message.content
@ -865,7 +955,21 @@ export default function ThreadPage({ params }: { params: Promise<ThreadParams> }
>
<div className="max-w-[85%] rounded-lg bg-muted px-4 py-3 text-sm">
<div className="whitespace-pre-wrap break-words">
{streamContent}
{toolCallData ? (
<div className="font-mono text-xs">
<div className="flex items-center gap-2 mb-1 text-muted-foreground">
<div className="flex h-4 w-4 items-center justify-center rounded-full bg-primary/10">
<div className="h-2 w-2 rounded-full bg-primary animate-pulse"></div>
</div>
<span>Tool: {toolCallData.name}</span>
</div>
<div className="mt-1 p-3 bg-secondary/20 rounded-md overflow-x-auto">
{toolCallData.arguments || ''}
</div>
</div>
) : (
streamContent
)}
{isStreaming && (
<span className="inline-flex items-center ml-0.5">
<span

View File

@ -0,0 +1,60 @@
2025-04-08 00:12:44,337 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:12:44,337 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:12:44,337 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:12:44,337 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:12:44,337 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:12:44,337 - agentpress - INFO - llm.py:194 - Making LLM API call to model: anthropic/claude-3-5-sonnet-latest
2025-04-08 00:12:44,337 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:12:44,337 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:12:45,623 - agentpress - INFO - llm.py:217 - Successfully received API response from anthropic/claude-3-5-sonnet-latest
2025-04-08 00:12:45,623 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x1055aafa0>
2025-04-08 00:16:23,059 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:16:23,060 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:16:23,060 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:16:23,060 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:16:23,060 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:16:23,060 - agentpress - INFO - llm.py:194 - Making LLM API call to model: anthropic/claude-3-5-sonnet-latest
2025-04-08 00:16:23,060 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:16:23,060 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:16:24,620 - agentpress - INFO - llm.py:217 - Successfully received API response from anthropic/claude-3-5-sonnet-latest
2025-04-08 00:16:24,620 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x10749ffd0>
2025-04-08 00:16:59,313 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:16:59,313 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:16:59,313 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:16:59,313 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:16:59,313 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:16:59,313 - agentpress - INFO - llm.py:194 - Making LLM API call to model: anthropic/claude-3-5-sonnet-latest
2025-04-08 00:16:59,313 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:16:59,313 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:17:00,512 - agentpress - INFO - llm.py:217 - Successfully received API response from anthropic/claude-3-5-sonnet-latest
2025-04-08 00:17:00,512 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x1073eddc0>
2025-04-08 00:17:55,152 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:17:55,152 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:17:55,152 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:17:55,152 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:17:55,152 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:17:55,153 - agentpress - INFO - llm.py:194 - Making LLM API call to model: anthropic/claude-3-5-sonnet-latest
2025-04-08 00:17:55,153 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:17:55,153 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:17:56,322 - agentpress - INFO - llm.py:217 - Successfully received API response from anthropic/claude-3-5-sonnet-latest
2025-04-08 00:17:56,323 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x107710220>
2025-04-08 00:18:23,004 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:18:23,004 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:18:23,004 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:18:23,004 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:18:23,004 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:18:23,004 - agentpress - INFO - llm.py:194 - Making LLM API call to model: anthropic/claude-3-5-sonnet-latest
2025-04-08 00:18:23,004 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:18:23,004 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:18:23,944 - agentpress - INFO - llm.py:217 - Successfully received API response from anthropic/claude-3-5-sonnet-latest
2025-04-08 00:18:23,944 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x106f1df70>
2025-04-08 00:18:54,251 - agentpress - DEBUG - llm.py:43 - API key set for provider: OPENAI
2025-04-08 00:18:54,251 - agentpress - DEBUG - llm.py:43 - API key set for provider: ANTHROPIC
2025-04-08 00:18:54,251 - agentpress - DEBUG - llm.py:43 - API key set for provider: GROQ
2025-04-08 00:18:54,251 - agentpress - WARNING - llm.py:45 - No API key found for provider: OPENROUTER
2025-04-08 00:18:54,251 - agentpress - WARNING - llm.py:64 - Missing AWS credentials for Bedrock integration - access_key: False, secret_key: True, region: None
2025-04-08 00:18:54,252 - agentpress - INFO - llm.py:194 - Making LLM API call to model: openai/gpt-4o
2025-04-08 00:18:54,252 - agentpress - DEBUG - llm.py:121 - Added 1 tools to API parameters
2025-04-08 00:18:54,252 - agentpress - DEBUG - llm.py:213 - Attempt 1/3
2025-04-08 00:18:55,196 - agentpress - INFO - llm.py:217 - Successfully received API response from openai/gpt-4o
2025-04-08 00:18:55,197 - agentpress - DEBUG - llm.py:218 - Response: <litellm.litellm_core_utils.streaming_handler.CustomStreamWrapper object at 0x10832fe80>