mirror of https://github.com/kortix-ai/suna.git
Merge branch 'kortix-ai:main' into sheets-agent
This commit is contained in:
commit
2281ac7678
|
@ -12,7 +12,7 @@ classifiers = [
|
|||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"python-dotenv==1.0.1",
|
||||
"litellm==1.72.2",
|
||||
"litellm==1.75.2",
|
||||
"click==8.1.7",
|
||||
"questionary==2.0.1",
|
||||
"requests==2.32.3",
|
||||
|
@ -74,3 +74,8 @@ repository = "https://github.com/kortix-ai/suna"
|
|||
|
||||
[tool.uv]
|
||||
package = false
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"orjson>=3.11.1",
|
||||
]
|
||||
|
|
|
@ -21,7 +21,9 @@ from utils.logger import logger
|
|||
from utils.config import config
|
||||
|
||||
# litellm.set_verbose=True
|
||||
litellm.modify_params=True
|
||||
# Let LiteLLM auto-adjust params and drop unsupported ones (e.g., GPT-5 temperature!=1)
|
||||
litellm.modify_params = True
|
||||
litellm.drop_params = True
|
||||
|
||||
# Constants
|
||||
MAX_RETRIES = 2
|
||||
|
@ -144,7 +146,9 @@ def prepare_params(
|
|||
logger.debug(f"Skipping max_tokens for Claude 3.7 model: {model_name}")
|
||||
# Do not add any max_tokens parameter for Claude 3.7
|
||||
else:
|
||||
param_name = "max_completion_tokens" if 'o1' in model_name else "max_tokens"
|
||||
is_openai_o_series = 'o1' in model_name
|
||||
is_openai_gpt5 = 'gpt-5' in model_name
|
||||
param_name = "max_completion_tokens" if (is_openai_o_series or is_openai_gpt5) else "max_tokens"
|
||||
params[param_name] = max_tokens
|
||||
|
||||
# Add tools if provided
|
||||
|
@ -199,6 +203,10 @@ def prepare_params(
|
|||
# Apply Anthropic prompt caching (minimal implementation)
|
||||
# Check model name *after* potential modifications (like adding bedrock/ prefix)
|
||||
effective_model_name = params.get("model", model_name) # Use model from params if set, else original
|
||||
|
||||
# OpenAI GPT-5: drop unsupported temperature param (only default 1 allowed)
|
||||
if "gpt-5" in effective_model_name and "temperature" in params and params["temperature"] != 1:
|
||||
params.pop("temperature", None)
|
||||
if "claude" in effective_model_name.lower() or "anthropic" in effective_model_name.lower():
|
||||
messages = params["messages"] # Direct reference, modification affects params
|
||||
|
||||
|
|
|
@ -84,6 +84,22 @@ MODELS = {
|
|||
},
|
||||
"tier_availability": ["paid"]
|
||||
},
|
||||
"openai/gpt-5": {
|
||||
"aliases": ["gpt-5"],
|
||||
"pricing": {
|
||||
"input_cost_per_million_tokens": 1.25,
|
||||
"output_cost_per_million_tokens": 10.00
|
||||
},
|
||||
"tier_availability": ["paid"]
|
||||
},
|
||||
"openai/gpt-5-mini": {
|
||||
"aliases": ["gpt-5-mini"],
|
||||
"pricing": {
|
||||
"input_cost_per_million_tokens": 0.25,
|
||||
"output_cost_per_million_tokens": 2.00
|
||||
},
|
||||
"tier_availability": ["paid"]
|
||||
},
|
||||
"openai/gpt-4.1-mini": {
|
||||
"aliases": ["gpt-4.1-mini"],
|
||||
"pricing": {
|
||||
|
|
2796
backend/uv.lock
2796
backend/uv.lock
File diff suppressed because it is too large
Load Diff
|
@ -78,7 +78,7 @@ export const MODELS = {
|
|||
},
|
||||
'gpt-4.1': {
|
||||
tier: 'premium',
|
||||
priority: 96,
|
||||
priority: 92,
|
||||
recommended: false,
|
||||
lowQuality: false
|
||||
},
|
||||
|
@ -94,6 +94,18 @@ export const MODELS = {
|
|||
recommended: false,
|
||||
lowQuality: false
|
||||
},
|
||||
'gpt-5': {
|
||||
tier: 'premium',
|
||||
priority: 99,
|
||||
recommended: false,
|
||||
lowQuality: false
|
||||
},
|
||||
'gpt-5-mini': {
|
||||
tier: 'premium',
|
||||
priority: 88,
|
||||
recommended: false,
|
||||
lowQuality: false
|
||||
},
|
||||
'gemini-2.5-flash:thinking': {
|
||||
tier: 'premium',
|
||||
priority: 84,
|
||||
|
|
Loading…
Reference in New Issue