This commit is contained in:
marko-kraemer 2025-09-18 09:21:31 +02:00
parent d2cfa0c6f2
commit bf919ddf9f
5 changed files with 11 additions and 13 deletions

View File

@ -72,7 +72,6 @@ class ErrorProcessor:
"""Process LLM-related errors using LiteLLM's exception types."""
error_message = ErrorProcessor.safe_error_to_string(error)
# Use LiteLLM's exception types for precise error categorization
if isinstance(error, ContextWindowExceededError):
return ProcessedError(
error_type="context_window_exceeded",

View File

@ -56,7 +56,7 @@ class AgentConfig:
enable_context_manager: bool = True
agent_config: Optional[dict] = None
trace: Optional[StatefulTraceClient] = None
enable_prompt_caching: bool = False # Temporarily disabled for debugging
enable_prompt_caching: bool = True
class ToolManager:

View File

@ -298,15 +298,15 @@ async def make_llm_api_call(
logger.debug(f"Making LLM API call to model: {model_name} with {len(messages)} messages")
# Check token count for context window issues
try:
from litellm import token_counter
total_tokens = token_counter(model=model_name, messages=messages)
logger.debug(f"Estimated input tokens: {total_tokens}")
# try:
# from litellm import token_counter
# total_tokens = token_counter(model=model_name, messages=messages)
# logger.debug(f"Estimated input tokens: {total_tokens}")
if total_tokens > 200000:
logger.warning(f"High token count detected: {total_tokens}")
except Exception:
pass # Token counting is optional
# if total_tokens > 200000:
# logger.warning(f"High token count detected: {total_tokens}")
# except Exception:
# pass # Token counting is optional
# Prepare parameters
params = prepare_params(

View File

@ -248,7 +248,6 @@ export function ShareModal({ isOpen, onClose, threadId, projectId }: ShareModalP
) : (
<div className="text-center space-y-4">
<div className="space-y-2">
<h3 className="text-xl font-semibold">Share this chat</h3>
<p className="text-sm text-muted-foreground">
Create a shareable link that allows others to view this conversation publicly.
</p>

View File

@ -64,14 +64,14 @@ export const useModelSelection = () => {
if (!modelsData?.models || isLoadingModels) {
models = [
{
id: 'moonshotai/kimi-k2',
id: 'Kimi K2',
label: 'Kimi K2',
requiresSubscription: false,
priority: 100,
recommended: true
},
{
id: 'claude-sonnet-4',
id: 'Claude Sonnet 4',
label: 'Claude Sonnet 4',
requiresSubscription: true,
priority: 100,