mirror of https://github.com/kortix-ai/suna.git
temp disable
This commit is contained in:
parent
ea701e3620
commit
7a6d09ffe7
|
@ -635,7 +635,7 @@ async def initiate_agent_with_files(
|
|||
reasoning_effort: Optional[str] = Form("low"),
|
||||
stream: Optional[bool] = Form(True),
|
||||
enable_context_manager: Optional[bool] = Form(False),
|
||||
enable_prompt_caching: Optional[bool] = Form(True),
|
||||
enable_prompt_caching: Optional[bool] = Form(False),
|
||||
agent_id: Optional[str] = Form(None), # Add agent_id parameter
|
||||
files: List[UploadFile] = File(default=[]),
|
||||
user_id: str = Depends(verify_and_get_user_id_from_jwt)
|
||||
|
|
|
@ -215,7 +215,7 @@ class ThreadManager:
|
|||
enable_thinking: Optional[bool] = False,
|
||||
reasoning_effort: Optional[str] = 'low',
|
||||
generation: Optional[StatefulGenerationClient] = None,
|
||||
enable_prompt_caching: bool = True,
|
||||
enable_prompt_caching: bool = False,
|
||||
enable_context_manager: Optional[bool] = None,
|
||||
) -> Union[Dict[str, Any], AsyncGenerator]:
|
||||
"""Run a conversation thread with LLM integration and tool execution."""
|
||||
|
@ -271,7 +271,7 @@ class ThreadManager:
|
|||
config: ProcessorConfig, stream: bool, enable_thinking: Optional[bool],
|
||||
reasoning_effort: Optional[str], generation: Optional[StatefulGenerationClient],
|
||||
auto_continue_state: Dict[str, Any], temporary_message: Optional[Dict[str, Any]] = None,
|
||||
enable_prompt_caching: bool = True, use_context_manager: bool = True
|
||||
enable_prompt_caching: bool = False, use_context_manager: bool = True
|
||||
) -> Union[Dict[str, Any], AsyncGenerator]:
|
||||
"""Execute a single LLM run."""
|
||||
logger.debug(f"_execute_run called with config type: {type(config)}")
|
||||
|
@ -379,7 +379,7 @@ class ThreadManager:
|
|||
config: ProcessorConfig, stream: bool, enable_thinking: Optional[bool],
|
||||
reasoning_effort: Optional[str], generation: Optional[StatefulGenerationClient],
|
||||
auto_continue_state: Dict[str, Any], temporary_message: Optional[Dict[str, Any]],
|
||||
native_max_auto_continues: int, enable_prompt_caching: bool = True,
|
||||
native_max_auto_continues: int, enable_prompt_caching: bool = False,
|
||||
use_context_manager: bool = True
|
||||
) -> AsyncGenerator:
|
||||
"""Generator that handles auto-continue logic."""
|
||||
|
|
|
@ -11,7 +11,7 @@ class AgentStartRequest(BaseModel):
|
|||
reasoning_effort: Optional[str] = 'low'
|
||||
stream: Optional[bool] = True
|
||||
enable_context_manager: Optional[bool] = False
|
||||
enable_prompt_caching: Optional[bool] = True
|
||||
enable_prompt_caching: Optional[bool] = False
|
||||
agent_id: Optional[str] = None # Custom agent to use
|
||||
|
||||
|
||||
|
|
|
@ -420,7 +420,7 @@ class AgentExecutor:
|
|||
reasoning_effort="low",
|
||||
stream=False,
|
||||
enable_context_manager=True,
|
||||
enable_prompt_caching=True,
|
||||
enable_prompt_caching=False,
|
||||
agent_config=agent_config,
|
||||
request_id=structlog.contextvars.get_contextvars().get('request_id'),
|
||||
)
|
||||
|
@ -729,7 +729,7 @@ class WorkflowExecutor:
|
|||
reasoning_effort='medium',
|
||||
stream=False,
|
||||
enable_context_manager=True,
|
||||
enable_prompt_caching=True,
|
||||
enable_prompt_caching=False,
|
||||
agent_config=agent_config,
|
||||
request_id=None,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue