mirror of https://github.com/kortix-ai/suna.git
chore(api, billing, llm): update worker count, refine billing tier names, and adjust logging in LLM API calls
This commit is contained in:
parent
46f42cd7ce
commit
e98ab052f9
|
@ -200,7 +200,7 @@ if __name__ == "__main__":
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
||||||
|
|
||||||
workers = 1
|
workers = 4
|
||||||
|
|
||||||
logger.info(f"Starting server on 0.0.0.0:8000 with {workers} workers")
|
logger.info(f"Starting server on 0.0.0.0:8000 with {workers} workers")
|
||||||
uvicorn.run(
|
uvicorn.run(
|
||||||
|
|
|
@ -80,13 +80,13 @@ SUBSCRIPTION_TIERS = {
|
||||||
config.STRIPE_TIER_125_800_ID: {'name': 'tier_125_800', 'minutes': 7500, 'cost': 800}, # 125 hours
|
config.STRIPE_TIER_125_800_ID: {'name': 'tier_125_800', 'minutes': 7500, 'cost': 800}, # 125 hours
|
||||||
config.STRIPE_TIER_200_1000_ID: {'name': 'tier_200_1000', 'minutes': 12000, 'cost': 1000}, # 200 hours
|
config.STRIPE_TIER_200_1000_ID: {'name': 'tier_200_1000', 'minutes': 12000, 'cost': 1000}, # 200 hours
|
||||||
# Yearly tiers (same usage limits, different billing period)
|
# Yearly tiers (same usage limits, different billing period)
|
||||||
config.STRIPE_TIER_2_20_YEARLY_ID: {'name': 'tier_2_20_yearly', 'minutes': 120, 'cost': 20}, # 2 hours/month, $204/year
|
config.STRIPE_TIER_2_20_YEARLY_ID: {'name': 'tier_2_20', 'minutes': 120, 'cost': 20}, # 2 hours/month, $204/year
|
||||||
config.STRIPE_TIER_6_50_YEARLY_ID: {'name': 'tier_6_50_yearly', 'minutes': 360, 'cost': 50}, # 6 hours/month, $510/year
|
config.STRIPE_TIER_6_50_YEARLY_ID: {'name': 'tier_6_50', 'minutes': 360, 'cost': 50}, # 6 hours/month, $510/year
|
||||||
config.STRIPE_TIER_12_100_YEARLY_ID: {'name': 'tier_12_100_yearly', 'minutes': 720, 'cost': 100}, # 12 hours/month, $1020/year
|
config.STRIPE_TIER_12_100_YEARLY_ID: {'name': 'tier_12_100', 'minutes': 720, 'cost': 100}, # 12 hours/month, $1020/year
|
||||||
config.STRIPE_TIER_25_200_YEARLY_ID: {'name': 'tier_25_200_yearly', 'minutes': 1500, 'cost': 200}, # 25 hours/month, $2040/year
|
config.STRIPE_TIER_25_200_YEARLY_ID: {'name': 'tier_25_200', 'minutes': 1500, 'cost': 200}, # 25 hours/month, $2040/year
|
||||||
config.STRIPE_TIER_50_400_YEARLY_ID: {'name': 'tier_50_400_yearly', 'minutes': 3000, 'cost': 400}, # 50 hours/month, $4080/year
|
config.STRIPE_TIER_50_400_YEARLY_ID: {'name': 'tier_50_400', 'minutes': 3000, 'cost': 400}, # 50 hours/month, $4080/year
|
||||||
config.STRIPE_TIER_125_800_YEARLY_ID: {'name': 'tier_125_800_yearly', 'minutes': 7500, 'cost': 800}, # 125 hours/month, $8160/year
|
config.STRIPE_TIER_125_800_YEARLY_ID: {'name': 'tier_125_800', 'minutes': 7500, 'cost': 800}, # 125 hours/month, $8160/year
|
||||||
config.STRIPE_TIER_200_1000_YEARLY_ID: {'name': 'tier_200_1000_yearly', 'minutes': 12000, 'cost': 1000}, # 200 hours/month, $10200/year
|
config.STRIPE_TIER_200_1000_YEARLY_ID: {'name': 'tier_200_1000', 'minutes': 12000, 'cost': 1000}, # 200 hours/month, $10200/year
|
||||||
}
|
}
|
||||||
|
|
||||||
# Pydantic models for request/response validation
|
# Pydantic models for request/response validation
|
||||||
|
|
|
@ -133,6 +133,7 @@ def prepare_params(
|
||||||
"model": "openrouter/anthropic/claude-sonnet-4",
|
"model": "openrouter/anthropic/claude-sonnet-4",
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
}]
|
}]
|
||||||
|
# params["mock_testing_fallback"] = True
|
||||||
logger.debug("Added Claude-specific headers")
|
logger.debug("Added Claude-specific headers")
|
||||||
|
|
||||||
# Add OpenRouter-specific parameters
|
# Add OpenRouter-specific parameters
|
||||||
|
@ -312,7 +313,7 @@ async def make_llm_api_call(
|
||||||
|
|
||||||
response = await litellm.acompletion(**params)
|
response = await litellm.acompletion(**params)
|
||||||
logger.debug(f"Successfully received API response from {model_name}")
|
logger.debug(f"Successfully received API response from {model_name}")
|
||||||
logger.debug(f"Response: {response}")
|
# logger.debug(f"Response: {response}")
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except (litellm.exceptions.RateLimitError, OpenAIError, json.JSONDecodeError) as e:
|
except (litellm.exceptions.RateLimitError, OpenAIError, json.JSONDecodeError) as e:
|
||||||
|
|
Loading…
Reference in New Issue