From 7a6d09ffe787e3d7a4ddc2877e956624f4d89b60 Mon Sep 17 00:00:00 2001 From: marko-kraemer Date: Sun, 21 Sep 2025 15:40:29 +0200 Subject: [PATCH] temp disable --- backend/core/agent_runs.py | 2 +- backend/core/agentpress/thread_manager.py | 6 +++--- backend/core/api_models/threads.py | 2 +- backend/core/triggers/execution_service.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/backend/core/agent_runs.py b/backend/core/agent_runs.py index e82e8276..f848bc2e 100644 --- a/backend/core/agent_runs.py +++ b/backend/core/agent_runs.py @@ -635,7 +635,7 @@ async def initiate_agent_with_files( reasoning_effort: Optional[str] = Form("low"), stream: Optional[bool] = Form(True), enable_context_manager: Optional[bool] = Form(False), - enable_prompt_caching: Optional[bool] = Form(True), + enable_prompt_caching: Optional[bool] = Form(False), agent_id: Optional[str] = Form(None), # Add agent_id parameter files: List[UploadFile] = File(default=[]), user_id: str = Depends(verify_and_get_user_id_from_jwt) diff --git a/backend/core/agentpress/thread_manager.py b/backend/core/agentpress/thread_manager.py index 919c8c40..3f2a973b 100644 --- a/backend/core/agentpress/thread_manager.py +++ b/backend/core/agentpress/thread_manager.py @@ -215,7 +215,7 @@ class ThreadManager: enable_thinking: Optional[bool] = False, reasoning_effort: Optional[str] = 'low', generation: Optional[StatefulGenerationClient] = None, - enable_prompt_caching: bool = True, + enable_prompt_caching: bool = False, enable_context_manager: Optional[bool] = None, ) -> Union[Dict[str, Any], AsyncGenerator]: """Run a conversation thread with LLM integration and tool execution.""" @@ -271,7 +271,7 @@ class ThreadManager: config: ProcessorConfig, stream: bool, enable_thinking: Optional[bool], reasoning_effort: Optional[str], generation: Optional[StatefulGenerationClient], auto_continue_state: Dict[str, Any], temporary_message: Optional[Dict[str, Any]] = None, - enable_prompt_caching: bool = True, use_context_manager: bool = True + enable_prompt_caching: bool = False, use_context_manager: bool = True ) -> Union[Dict[str, Any], AsyncGenerator]: """Execute a single LLM run.""" logger.debug(f"_execute_run called with config type: {type(config)}") @@ -379,7 +379,7 @@ class ThreadManager: config: ProcessorConfig, stream: bool, enable_thinking: Optional[bool], reasoning_effort: Optional[str], generation: Optional[StatefulGenerationClient], auto_continue_state: Dict[str, Any], temporary_message: Optional[Dict[str, Any]], - native_max_auto_continues: int, enable_prompt_caching: bool = True, + native_max_auto_continues: int, enable_prompt_caching: bool = False, use_context_manager: bool = True ) -> AsyncGenerator: """Generator that handles auto-continue logic.""" diff --git a/backend/core/api_models/threads.py b/backend/core/api_models/threads.py index 861d6d43..6f824614 100644 --- a/backend/core/api_models/threads.py +++ b/backend/core/api_models/threads.py @@ -11,7 +11,7 @@ class AgentStartRequest(BaseModel): reasoning_effort: Optional[str] = 'low' stream: Optional[bool] = True enable_context_manager: Optional[bool] = False - enable_prompt_caching: Optional[bool] = True + enable_prompt_caching: Optional[bool] = False agent_id: Optional[str] = None # Custom agent to use diff --git a/backend/core/triggers/execution_service.py b/backend/core/triggers/execution_service.py index 4a48a8ef..811b3095 100644 --- a/backend/core/triggers/execution_service.py +++ b/backend/core/triggers/execution_service.py @@ -420,7 +420,7 @@ class AgentExecutor: reasoning_effort="low", stream=False, enable_context_manager=True, - enable_prompt_caching=True, + enable_prompt_caching=False, agent_config=agent_config, request_id=structlog.contextvars.get_contextvars().get('request_id'), ) @@ -729,7 +729,7 @@ class WorkflowExecutor: reasoning_effort='medium', stream=False, enable_context_manager=True, - enable_prompt_caching=True, + enable_prompt_caching=False, agent_config=agent_config, request_id=None, )