mirror of https://github.com/kortix-ai/suna.git
disable prompt caching temp
This commit is contained in:
parent
c205276752
commit
626251aec7
|
@ -251,6 +251,7 @@ async def start_agent(
|
|||
model_name=model_name, # Already resolved above
|
||||
enable_thinking=body.enable_thinking, reasoning_effort=body.reasoning_effort,
|
||||
stream=body.stream, enable_context_manager=body.enable_context_manager,
|
||||
enable_prompt_caching=body.enable_prompt_caching,
|
||||
agent_config=agent_config, # Pass agent configuration
|
||||
request_id=request_id,
|
||||
)
|
||||
|
@ -634,6 +635,7 @@ async def initiate_agent_with_files(
|
|||
reasoning_effort: Optional[str] = Form("low"),
|
||||
stream: Optional[bool] = Form(True),
|
||||
enable_context_manager: Optional[bool] = Form(False),
|
||||
enable_prompt_caching: Optional[bool] = Form(True),
|
||||
agent_id: Optional[str] = Form(None), # Add agent_id parameter
|
||||
files: List[UploadFile] = File(default=[]),
|
||||
user_id: str = Depends(verify_and_get_user_id_from_jwt)
|
||||
|
@ -985,6 +987,7 @@ async def initiate_agent_with_files(
|
|||
model_name=model_name, # Already resolved above
|
||||
enable_thinking=enable_thinking, reasoning_effort=reasoning_effort,
|
||||
stream=stream, enable_context_manager=enable_context_manager,
|
||||
enable_prompt_caching=enable_prompt_caching,
|
||||
agent_config=agent_config, # Pass agent configuration
|
||||
request_id=request_id,
|
||||
)
|
||||
|
|
|
@ -11,6 +11,7 @@ class AgentStartRequest(BaseModel):
|
|||
reasoning_effort: Optional[str] = 'low'
|
||||
stream: Optional[bool] = True
|
||||
enable_context_manager: Optional[bool] = False
|
||||
enable_prompt_caching: Optional[bool] = True
|
||||
agent_id: Optional[str] = None # Custom agent to use
|
||||
|
||||
|
||||
|
|
|
@ -687,7 +687,8 @@ class AgentRunner:
|
|||
enable_thinking=self.config.enable_thinking,
|
||||
reasoning_effort=self.config.reasoning_effort,
|
||||
generation=generation,
|
||||
enable_prompt_caching=self.config.enable_prompt_caching
|
||||
enable_prompt_caching=self.config.enable_prompt_caching,
|
||||
enable_context_manager=self.config.enable_context_manager
|
||||
)
|
||||
|
||||
last_tool_call = None
|
||||
|
@ -814,6 +815,7 @@ async def run_agent(
|
|||
enable_thinking: Optional[bool] = False,
|
||||
reasoning_effort: Optional[str] = 'low',
|
||||
enable_context_manager: bool = True,
|
||||
enable_prompt_caching: bool = True,
|
||||
agent_config: Optional[dict] = None,
|
||||
trace: Optional[StatefulTraceClient] = None
|
||||
):
|
||||
|
@ -839,6 +841,7 @@ async def run_agent(
|
|||
enable_thinking=enable_thinking,
|
||||
reasoning_effort=reasoning_effort,
|
||||
enable_context_manager=enable_context_manager,
|
||||
enable_prompt_caching=enable_prompt_caching,
|
||||
agent_config=agent_config,
|
||||
trace=trace
|
||||
)
|
||||
|
|
|
@ -420,6 +420,7 @@ class AgentExecutor:
|
|||
reasoning_effort="low",
|
||||
stream=False,
|
||||
enable_context_manager=True,
|
||||
enable_prompt_caching=True,
|
||||
agent_config=agent_config,
|
||||
request_id=structlog.contextvars.get_contextvars().get('request_id'),
|
||||
)
|
||||
|
@ -728,6 +729,7 @@ class WorkflowExecutor:
|
|||
reasoning_effort='medium',
|
||||
stream=False,
|
||||
enable_context_manager=True,
|
||||
enable_prompt_caching=True,
|
||||
agent_config=agent_config,
|
||||
request_id=None,
|
||||
)
|
||||
|
|
|
@ -63,6 +63,7 @@ async def run_agent_background(
|
|||
reasoning_effort: Optional[str] = 'low',
|
||||
stream: bool = True,
|
||||
enable_context_manager: bool = False,
|
||||
enable_prompt_caching: bool = False,
|
||||
agent_config: Optional[dict] = None,
|
||||
request_id: Optional[str] = None
|
||||
):
|
||||
|
@ -193,6 +194,7 @@ async def run_agent_background(
|
|||
model_name=effective_model,
|
||||
enable_thinking=enable_thinking, reasoning_effort=reasoning_effort,
|
||||
enable_context_manager=enable_context_manager,
|
||||
enable_prompt_caching=enable_prompt_caching,
|
||||
agent_config=agent_config,
|
||||
trace=trace,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue