diff --git a/backend/api.py b/backend/api.py index a9254136..e9599b8c 100644 --- a/backend/api.py +++ b/backend/api.py @@ -200,7 +200,7 @@ if __name__ == "__main__": if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) - workers = 1 + workers = 4 logger.info(f"Starting server on 0.0.0.0:8000 with {workers} workers") uvicorn.run( diff --git a/backend/services/billing.py b/backend/services/billing.py index 1fd10908..db1c7dba 100644 --- a/backend/services/billing.py +++ b/backend/services/billing.py @@ -80,13 +80,13 @@ SUBSCRIPTION_TIERS = { config.STRIPE_TIER_125_800_ID: {'name': 'tier_125_800', 'minutes': 7500, 'cost': 800}, # 125 hours config.STRIPE_TIER_200_1000_ID: {'name': 'tier_200_1000', 'minutes': 12000, 'cost': 1000}, # 200 hours # Yearly tiers (same usage limits, different billing period) - config.STRIPE_TIER_2_20_YEARLY_ID: {'name': 'tier_2_20_yearly', 'minutes': 120, 'cost': 20}, # 2 hours/month, $204/year - config.STRIPE_TIER_6_50_YEARLY_ID: {'name': 'tier_6_50_yearly', 'minutes': 360, 'cost': 50}, # 6 hours/month, $510/year - config.STRIPE_TIER_12_100_YEARLY_ID: {'name': 'tier_12_100_yearly', 'minutes': 720, 'cost': 100}, # 12 hours/month, $1020/year - config.STRIPE_TIER_25_200_YEARLY_ID: {'name': 'tier_25_200_yearly', 'minutes': 1500, 'cost': 200}, # 25 hours/month, $2040/year - config.STRIPE_TIER_50_400_YEARLY_ID: {'name': 'tier_50_400_yearly', 'minutes': 3000, 'cost': 400}, # 50 hours/month, $4080/year - config.STRIPE_TIER_125_800_YEARLY_ID: {'name': 'tier_125_800_yearly', 'minutes': 7500, 'cost': 800}, # 125 hours/month, $8160/year - config.STRIPE_TIER_200_1000_YEARLY_ID: {'name': 'tier_200_1000_yearly', 'minutes': 12000, 'cost': 1000}, # 200 hours/month, $10200/year + config.STRIPE_TIER_2_20_YEARLY_ID: {'name': 'tier_2_20', 'minutes': 120, 'cost': 20}, # 2 hours/month, $204/year + config.STRIPE_TIER_6_50_YEARLY_ID: {'name': 'tier_6_50', 'minutes': 360, 'cost': 50}, # 6 hours/month, $510/year + config.STRIPE_TIER_12_100_YEARLY_ID: {'name': 'tier_12_100', 'minutes': 720, 'cost': 100}, # 12 hours/month, $1020/year + config.STRIPE_TIER_25_200_YEARLY_ID: {'name': 'tier_25_200', 'minutes': 1500, 'cost': 200}, # 25 hours/month, $2040/year + config.STRIPE_TIER_50_400_YEARLY_ID: {'name': 'tier_50_400', 'minutes': 3000, 'cost': 400}, # 50 hours/month, $4080/year + config.STRIPE_TIER_125_800_YEARLY_ID: {'name': 'tier_125_800', 'minutes': 7500, 'cost': 800}, # 125 hours/month, $8160/year + config.STRIPE_TIER_200_1000_YEARLY_ID: {'name': 'tier_200_1000', 'minutes': 12000, 'cost': 1000}, # 200 hours/month, $10200/year } # Pydantic models for request/response validation diff --git a/backend/services/llm.py b/backend/services/llm.py index c7491704..d610a1a1 100644 --- a/backend/services/llm.py +++ b/backend/services/llm.py @@ -133,6 +133,7 @@ def prepare_params( "model": "openrouter/anthropic/claude-sonnet-4", "messages": messages, }] + # params["mock_testing_fallback"] = True logger.debug("Added Claude-specific headers") # Add OpenRouter-specific parameters @@ -312,7 +313,7 @@ async def make_llm_api_call( response = await litellm.acompletion(**params) logger.debug(f"Successfully received API response from {model_name}") - logger.debug(f"Response: {response}") + # logger.debug(f"Response: {response}") return response except (litellm.exceptions.RateLimitError, OpenAIError, json.JSONDecodeError) as e: