instant stream

This commit is contained in:
marko-kraemer 2024-11-13 00:57:38 +01:00
parent b5e79d264f
commit f8fc799726
4 changed files with 129 additions and 42 deletions

View File

@ -0,0 +1,75 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Modern Landing Page</title>
<link rel="stylesheet" href="reset.css">
<link rel="stylesheet" href="styles.css">
</head>
<body>
<header class="header container">
<div class="header-logo">Brand</div>
<nav>
<ul class="nav-links">
<li><a href="#home">Home</a></li>
<li><a href="#features">Features</a></li>
<li><a href="#contact">Contact</a></li>
</ul>
</nav>
</header>
<main>
<section class="hero">
<div class="container hero-content">
<h1 class="hero-title">Transform Your Digital Experience</h1>
<p class="hero-subtitle">Discover innovative solutions that drive your business forward</p>
<a href="#features" class="cta-button">Get Started</a>
</div>
</section>
<section id="features" class="features">
<div class="container">
<div class="feature-grid">
<div class="feature">
<div class="feature-icon">🚀</div>
<h3>Fast Performance</h3>
<p>Optimized solutions for maximum efficiency</p>
</div>
<div class="feature">
<div class="feature-icon">🔒</div>
<h3>Secure Platform</h3>
<p>Advanced security measures to protect your data</p>
</div>
<div class="feature">
<div class="feature-icon">📊</div>
<h3>Smart Analytics</h3>
<p>Comprehensive insights for strategic decisions</p>
</div>
</div>
</div>
</section>
</main>
<footer class="footer">
<div class="container">
<div class="footer-content">
<div class="footer-logo">Brand</div>
<div class="footer-links">
<a href="#home">Home</a>
<a href="#features">Features</a>
<a href="#contact">Contact</a>
</div>
<div class="footer-social">
<a href="#">Twitter</a>
<a href="#">LinkedIn</a>
<a href="#">GitHub</a>
</div>
</div>
<div class="footer-copyright">
&copy; 2023 Brand. All rights reserved.
</div>
</div>
</footer>
</body>
</html>

View File

@ -0,0 +1 @@
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}html{scroll-behavior:smooth}body{line-height:1.6;font-family:'-apple-system','BlinkMacSystemFont','Segoe UI','Roboto','Oxygen','Ubuntu','Cantarell','Open Sans','Helvetica Neue',sans-serif}img{max-width:100%;height:auto}a{text-decoration:none;color:inherit}

View File

@ -0,0 +1 @@
:root{--primary-color:#3498db;--secondary-color:#2ecc71;--text-color:#333;--background-color:#f4f4f4;--accent-color:#e74c3c}body{scroll-behavior:smooth;font-smooth:always;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body{background-color:var(--background-color);color:var(--text-color)}.container{max-width:1200px;margin:0 auto;padding:0 15px}.header{display:flex;justify-content:space-between;align-items:center;padding:20px 0;background-color:white;box-shadow:0 2px 4px rgba(0,0,0,0.1)}.header-logo{font-size:24px;font-weight:bold;color:var(--primary-color)}.nav-links{display:flex;list-style:none;gap:20px}.nav-links a{color:var(--text-color);transition:color 0.3s ease}.nav-links a:hover{color:var(--primary-color)}.hero{display:flex;align-items:center;min-height:70vh;background:linear-gradient(135deg,var(--primary-color),var(--secondary-color));color:white;text-align:center}.hero-content{max-width:800px;margin:0 auto}.hero-title{font-size:48px;margin-bottom:20px;font-weight:bold}.hero-subtitle{font-size:24px;margin-bottom:30px}.cta-button{display:inline-block;background-color:white;color:var(--primary-color);padding:12px 30px;border-radius:5px;font-weight:bold;transition:transform 0.3s ease}.cta-button:hover{transform:scale(1.05)}.footer{background-color:#333;color:white;text-align:center;padding:20px 0}.features{padding:80px 0;background-color:white}.feature-grid{display:grid;grid-template-columns:repeat(3,1fr);gap:30px}.feature{text-align:center;padding:30px;border-radius:10px;transition:all 0.3s ease;box-shadow:0 4px 6px rgba(0,0,0,0.1);background-color:white}.feature:hover{transform:translateY(-10px);box-shadow:0 8px 15px rgba(0,0,0,0.15);background-color:var(--background-color)}.feature-icon{font-size:48px;margin-bottom:20px}.feature h3{margin-bottom:15px;font-size:20px}.feature p{color:var(--text-color)}.footer{background-color:#1a1a1a;color:white;padding:40px 0}.footer-content{display:flex;justify-content:space-between;align-items:center;margin-bottom:20px}.footer-logo{font-size:24px;font-weight:bold}.footer-links{display:flex;gap:20px}.footer-links a{color:white;transition:color 0.3s ease}.footer-links a:hover{color:var(--primary-color)}.footer-social{display:flex;gap:15px}.footer-social a{color:white;transition:color 0.3s ease}.footer-social a:hover{color:var(--secondary-color)}.footer-copyright{text-align:center;padding-top:20px;border-top:1px solid rgba(255,255,255,0.1)}@media(max-width:768px){.header{flex-direction:column;text-align:center}.header-logo{margin-bottom:15px}.nav-links{flex-direction:column;align-items:center}.hero{text-align:center}.hero-title{font-size:36px}.hero-subtitle{font-size:18px}.feature-grid{grid-template-columns:1fr}.features{padding:40px 0}.footer-content{flex-direction:column;text-align:center}.footer-links,.footer-social{margin-top:20px;flex-direction:column;align-items:center}}

View File

@ -1,7 +1,8 @@
import logging
from typing import Dict, Any, AsyncGenerator, Callable
from agentpress.tool_executor import ToolExecutor
from agentpress.tool_parser import ToolParser
from agentpress.tool_executor import ToolExecutor
import asyncio
class LLMResponseProcessor:
"""
@ -23,6 +24,9 @@ class LLMResponseProcessor:
tool_calls_buffer (Dict): Buffer for storing incomplete tool calls during streaming
processed_tool_calls (set): Set of already processed tool call IDs
current_message (Dict): Current message being processed in streaming mode
content_buffer (str): Buffer for accumulating content during streaming
tool_calls_accumulated (list): List of tool calls accumulated during streaming
message_added (bool): Flag to indicate if a message has been added to the thread
"""
def __init__(
@ -44,7 +48,9 @@ class LLMResponseProcessor:
# State tracking for streaming responses
self.tool_calls_buffer = {}
self.processed_tool_calls = set()
self.current_message = None
self.content_buffer = ""
self.tool_calls_accumulated = []
self.message_added = False
async def process_stream(
self,
@ -55,40 +61,43 @@ class LLMResponseProcessor:
"""
Process streaming LLM response and handle tool execution.
Handles streaming responses chunk by chunk, managing tool execution timing
based on configuration. Tools can be executed immediately as they are
identified in the stream, or collected and executed after the complete
response is received.
Args:
response_stream: Stream of response chunks from the LLM
execute_tools: Whether to execute tool calls at all
immediate_execution: Whether to execute tools as they appear in the stream
or wait for complete response
Yields:
Processed response chunks
Yields chunks immediately as they arrive, while handling tool execution
and message management in the background.
"""
pending_tool_calls = []
async def process_chunk(chunk):
async def handle_message_management(chunk):
# Accumulate content
if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
self.content_buffer += chunk.choices[0].delta.content
# Parse and accumulate tool calls
parsed_message, is_complete = await self.tool_parser.parse_stream(
chunk,
self.tool_calls_buffer
)
if parsed_message and 'tool_calls' in parsed_message:
# Update or create message
if not self.current_message:
self.current_message = parsed_message
await self.add_message(self.thread_id, self.current_message)
else:
self.current_message['tool_calls'] = parsed_message['tool_calls']
await self.update_message(self.thread_id, self.current_message)
self.tool_calls_accumulated = parsed_message['tool_calls']
if execute_tools:
# Handle message management and tool execution
if chunk.choices[0].finish_reason or (self.content_buffer and self.tool_calls_accumulated):
message = {
"role": "assistant",
"content": self.content_buffer
}
if self.tool_calls_accumulated:
message["tool_calls"] = self.tool_calls_accumulated
if not self.message_added:
await self.add_message(self.thread_id, message)
self.message_added = True
else:
await self.update_message(self.thread_id, message)
# Handle tool execution
if execute_tools and self.tool_calls_accumulated:
new_tool_calls = [
tool_call for tool_call in parsed_message['tool_calls']
tool_call for tool_call in self.tool_calls_accumulated
if tool_call['id'] not in self.processed_tool_calls
]
@ -106,24 +115,25 @@ class LLMResponseProcessor:
else:
pending_tool_calls.extend(new_tool_calls)
# Process any pending tools at end of response
if chunk.choices[0].finish_reason and not immediate_execution and pending_tool_calls:
results = await self.tool_executor.execute_tool_calls(
tool_calls=pending_tool_calls,
available_functions=self.available_functions,
thread_id=self.thread_id,
executed_tool_calls=self.processed_tool_calls
)
for result in results:
await self.add_message(self.thread_id, result)
self.processed_tool_calls.add(result['tool_call_id'])
pending_tool_calls.clear()
return chunk
# Handle end of stream
if chunk.choices[0].finish_reason:
if not immediate_execution and pending_tool_calls:
results = await self.tool_executor.execute_tool_calls(
tool_calls=pending_tool_calls,
available_functions=self.available_functions,
thread_id=self.thread_id,
executed_tool_calls=self.processed_tool_calls
)
for result in results:
await self.add_message(self.thread_id, result)
self.processed_tool_calls.add(result['tool_call_id'])
pending_tool_calls.clear()
async for chunk in response_stream:
processed_chunk = await process_chunk(chunk)
yield processed_chunk
# Start background task for message management and tool execution
asyncio.create_task(handle_message_management(chunk))
# Immediately yield the chunk
yield chunk
async def process_response(
self,