From ecb077e12f0219299b8289c25355b129cd7817ff Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Sat, 12 Jul 2025 13:52:33 +0530 Subject: [PATCH 01/18] get/save llm keys --- backend/api.py | 3 ++ backend/local_llm/api.py | 30 +++++++++++++++++++ backend/services/llm.py | 4 +-- backend/utils/constants.py | 2 ++ backend/utils/local_api_keys.py | 52 +++++++++++++++++++++++++++++++++ 5 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 backend/local_llm/api.py create mode 100644 backend/utils/local_api_keys.py diff --git a/backend/api.py b/backend/api.py index b452658f..f39ab14d 100644 --- a/backend/api.py +++ b/backend/api.py @@ -183,6 +183,9 @@ api_router.include_router(triggers_api.router) from pipedream import api as pipedream_api api_router.include_router(pipedream_api.router) +from local_llm import api as local_llm_api +api_router.include_router(local_llm_api.router) + @api_router.get("/health") async def health_check(): logger.info("Health check endpoint called") diff --git a/backend/local_llm/api.py b/backend/local_llm/api.py new file mode 100644 index 00000000..4f89baa0 --- /dev/null +++ b/backend/local_llm/api.py @@ -0,0 +1,30 @@ +from fastapi import APIRouter +from utils.local_api_keys import save_local_api_keys, get_local_api_keys +from utils.config import config, EnvMode +from fastapi import HTTPException +from typing import Dict +from utils.constants import PROVIDERS + +router = APIRouter(tags=["local-llm-keys"]) + +@router.get("/local-llm-keys") +def get_llm_keys() -> Dict[str, str]: + + if config.ENV_MODE != EnvMode.LOCAL: + raise HTTPException(status_code=403, detail="API key management only available in local mode") + + providers = [f"{provider}_API_KEY" for provider in PROVIDERS] + llm_keys = get_local_api_keys(providers) + return llm_keys + +@router.post("/local-llm-keys") +def save_local_llm_keys(request: Dict[str, str]) -> Dict[str, str]: + if config.ENV_MODE != EnvMode.LOCAL: + raise HTTPException(status_code=403, detail="API key management only available in local mode") + + print(f"Saving local LLM keys: {request}") + key_saved = save_local_api_keys(request) + if key_saved: + return {"message": "API keys saved successfully"} + else: + raise HTTPException(status_code=500, detail="Failed to save API keys") \ No newline at end of file diff --git a/backend/services/llm.py b/backend/services/llm.py index 6187e247..24c270b3 100644 --- a/backend/services/llm.py +++ b/backend/services/llm.py @@ -19,6 +19,7 @@ import litellm from litellm.files.main import ModelResponse from utils.logger import logger from utils.config import config +from utils.constants import PROVIDERS # litellm.set_verbose=True litellm.modify_params=True @@ -38,8 +39,7 @@ class LLMRetryError(LLMError): def setup_api_keys() -> None: """Set up API keys from environment variables.""" - providers = ['OPENAI', 'ANTHROPIC', 'GROQ', 'OPENROUTER', 'XAI'] - for provider in providers: + for provider in PROVIDERS: key = getattr(config, f'{provider}_API_KEY') if key: logger.debug(f"API key set for provider: {provider}") diff --git a/backend/utils/constants.py b/backend/utils/constants.py index 9d85684f..45c90295 100644 --- a/backend/utils/constants.py +++ b/backend/utils/constants.py @@ -163,3 +163,5 @@ MODEL_ACCESS_TIERS = { "tier_125_800": PAID_TIER_MODELS, "tier_200_1000": PAID_TIER_MODELS, } + +PROVIDERS = ['OPENAI', 'ANTHROPIC', 'GROQ', 'OPENROUTER', 'XAI'] \ No newline at end of file diff --git a/backend/utils/local_api_keys.py b/backend/utils/local_api_keys.py new file mode 100644 index 00000000..0379390d --- /dev/null +++ b/backend/utils/local_api_keys.py @@ -0,0 +1,52 @@ +""" +Local API key management for LLMs. + +This module provides functionality to manage API keys in local mode +by reading and writing to the .env file. +""" + +import os +from typing import Dict, Optional, List +from utils.logger import logger +from utils.config import config, EnvMode +from dotenv import load_dotenv, set_key, find_dotenv +from utils.constants import PROVIDERS + +def get_local_api_keys(providers: List[str]) -> Dict[str, str]: + """Get API keys from .env file in local mode.""" + if config.ENV_MODE != EnvMode.LOCAL: + return {} + + try: + # Load current env vars + load_dotenv() + + return {provider: os.getenv(provider) or "" for provider in providers} + + except Exception as e: + logger.error(f"Failed to get local API keys: {e}") + return {} + +def save_local_api_keys(api_keys: Dict[str, str]) -> bool: + """Save API keys to .env file in local mode.""" + if config.ENV_MODE != EnvMode.LOCAL: + return False + + try: + # Find .env file + env_path = find_dotenv() + if not env_path: + logger.error("Could not find .env file") + return False + + # Update each API key + for key, value in api_keys.items(): + if value: # Only set if value is not empty + set_key(env_path, key, value) + logger.info(f"Updated {key} in .env file") + + return True + + except Exception as e: + logger.error(f"Failed to save local API keys: {e}") + return False \ No newline at end of file From 44906ee3d006eeca8114fa10fc43d6b88b7b6607 Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Sun, 13 Jul 2025 11:50:37 +0530 Subject: [PATCH 02/18] frontend for local api key management --- backend/local_llm/api.py | 1 - backend/utils/local_api_keys.py | 13 +- .../(personalAccount)/settings/layout.tsx | 1 + .../settings/llm-api-keys/page.tsx | 9 ++ .../src/components/api-keys/llm-api-keys.tsx | 133 ++++++++++++++++++ .../sidebar/nav-user-with-teams.tsx | 8 ++ .../thread/chat-input/model-selector.tsx | 20 ++- 7 files changed, 173 insertions(+), 12 deletions(-) create mode 100644 frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx create mode 100644 frontend/src/components/api-keys/llm-api-keys.tsx diff --git a/backend/local_llm/api.py b/backend/local_llm/api.py index 4f89baa0..507adff6 100644 --- a/backend/local_llm/api.py +++ b/backend/local_llm/api.py @@ -22,7 +22,6 @@ def save_local_llm_keys(request: Dict[str, str]) -> Dict[str, str]: if config.ENV_MODE != EnvMode.LOCAL: raise HTTPException(status_code=403, detail="API key management only available in local mode") - print(f"Saving local LLM keys: {request}") key_saved = save_local_api_keys(request) if key_saved: return {"message": "API keys saved successfully"} diff --git a/backend/utils/local_api_keys.py b/backend/utils/local_api_keys.py index 0379390d..d722393d 100644 --- a/backend/utils/local_api_keys.py +++ b/backend/utils/local_api_keys.py @@ -14,12 +14,9 @@ from utils.constants import PROVIDERS def get_local_api_keys(providers: List[str]) -> Dict[str, str]: """Get API keys from .env file in local mode.""" - if config.ENV_MODE != EnvMode.LOCAL: - return {} - try: # Load current env vars - load_dotenv() + load_dotenv(override=True) return {provider: os.getenv(provider) or "" for provider in providers} @@ -29,9 +26,6 @@ def get_local_api_keys(providers: List[str]) -> Dict[str, str]: def save_local_api_keys(api_keys: Dict[str, str]) -> bool: """Save API keys to .env file in local mode.""" - if config.ENV_MODE != EnvMode.LOCAL: - return False - try: # Find .env file env_path = find_dotenv() @@ -41,9 +35,8 @@ def save_local_api_keys(api_keys: Dict[str, str]) -> bool: # Update each API key for key, value in api_keys.items(): - if value: # Only set if value is not empty - set_key(env_path, key, value) - logger.info(f"Updated {key} in .env file") + set_key(env_path, key, value) + logger.info(f"Updated {key} in .env file") return True diff --git a/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx b/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx index 5f4b3d73..196f5869 100644 --- a/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx +++ b/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx @@ -15,6 +15,7 @@ export default function PersonalAccountSettingsPage({ // { name: "Teams", href: "/settings/teams" }, { name: 'Billing', href: '/settings/billing' }, { name: 'Usage Logs', href: '/settings/usage-logs' }, + { name: 'LLM API Keys', href: '/settings/llm-api-keys' }, ]; return ( <> diff --git a/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx b/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx new file mode 100644 index 00000000..93a53b9a --- /dev/null +++ b/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx @@ -0,0 +1,9 @@ +import { isLocalMode } from "@/lib/config"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { Shield } from "lucide-react"; +import { LLMApiKeys } from "@/components/api-keys/llm-api-keys"; + +export default function LLMKeysPage() { + + return +} \ No newline at end of file diff --git a/frontend/src/components/api-keys/llm-api-keys.tsx b/frontend/src/components/api-keys/llm-api-keys.tsx new file mode 100644 index 00000000..31b6eec2 --- /dev/null +++ b/frontend/src/components/api-keys/llm-api-keys.tsx @@ -0,0 +1,133 @@ +"use client"; + +import { Eye, EyeOff } from "lucide-react"; +import { Button } from "../ui/button"; +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "../ui/card"; +import { isLocalMode } from "@/lib/config"; +import { Input } from "../ui/input"; +import { Label } from "../ui/label"; +import { useEffect, useState } from "react"; +import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; +import { backendApi } from "@/lib/api-client"; +import { toast } from "sonner"; +import { useForm } from "react-hook-form"; + +interface APIKeyForm { + [key: string]: string; +} + +export function LLMApiKeys() { + const queryClient = useQueryClient(); + const [visibleKeys, setVisibleKeys] = useState>({}); + const {data: apiKeys, isLoading} = useQuery({ + queryKey: ['api-keys'], + queryFn: async() => { + const response = await backendApi.get('/local-llm-keys'); + return response.data; + }, + }); + + const { register, handleSubmit, formState: { errors, isDirty }, reset } = useForm({ + defaultValues: apiKeys || {} + }); + + const handleSave = async (data: APIKeyForm) => { + updateApiKeys.mutate(data); + } + const updateApiKeys = useMutation({ + mutationFn: async (data: APIKeyForm) => { + const response = await backendApi.post('/local-llm-keys', data); + await queryClient.invalidateQueries({ queryKey: ['api-keys'] }); + return response.data; + }, + onSuccess: (data) => { + toast.success(data.message); + }, + onError: () => { + toast.error('Failed to update API keys'); + } + }); + + const keysArray = apiKeys ? Object.entries(apiKeys).map(([key, value]) => ({ + id: key, + name: key.replace(/_/g, " ").replace("KEY", "Key"), + value: value + })) : []; + + useEffect(() => { + if (apiKeys) { + reset(apiKeys); + } + }, [apiKeys, reset]); + + const toggleKeyVisibility = (keyId: string) => { + setVisibleKeys(prev => ({ + ...prev, + [keyId]: !prev[keyId] + })); + } + + if (isLoading) { + return + + API Keys + Loading... + + ; + } + + return + + API Keys + + {isLocalMode() ? ( + <> + Manage your API keys for various Language Model providers. + + ) : ( + <> + API key management is only available in local mode. + + )} + + + + {isLocalMode() && ( + +
+ {keysArray && keysArray?.map((key: any) => ( + +
+ +
+ + +
+ {errors[key.id] &&

{errors[key.id]?.message}

} +
+ + ))} + +
+ +
+
+
+ )} +
+} \ No newline at end of file diff --git a/frontend/src/components/sidebar/nav-user-with-teams.tsx b/frontend/src/components/sidebar/nav-user-with-teams.tsx index ff02ff58..9b48983b 100644 --- a/frontend/src/components/sidebar/nav-user-with-teams.tsx +++ b/frontend/src/components/sidebar/nav-user-with-teams.tsx @@ -17,6 +17,7 @@ import { AudioWaveform, Sun, Moon, + Key, } from 'lucide-react'; import { useAccounts } from '@/hooks/use-accounts'; import NewTeamForm from '@/components/basejump/new-team-form'; @@ -48,6 +49,7 @@ import { } from '@/components/ui/dialog'; import { createClient } from '@/lib/supabase/client'; import { useTheme } from 'next-themes'; +import { isLocalMode } from '@/lib/config'; export function NavUserWithTeams({ user, @@ -286,6 +288,12 @@ export function NavUserWithTeams({ Billing + {isLocalMode() && + + + LLM API Keys + + } {/* diff --git a/frontend/src/components/thread/chat-input/model-selector.tsx b/frontend/src/components/thread/chat-input/model-selector.tsx index 1ad2b0c0..67b3181b 100644 --- a/frontend/src/components/thread/chat-input/model-selector.tsx +++ b/frontend/src/components/thread/chat-input/model-selector.tsx @@ -14,7 +14,7 @@ import { TooltipTrigger, } from '@/components/ui/tooltip'; import { Button } from '@/components/ui/button'; -import { Check, ChevronDown, Search, AlertTriangle, Crown, ArrowUpRight, Brain, Plus, Edit, Trash, Cpu } from 'lucide-react'; +import { Check, ChevronDown, Search, AlertTriangle, Crown, ArrowUpRight, Brain, Plus, Edit, Trash, Cpu, Key } from 'lucide-react'; import { ModelOption, SubscriptionStatus, @@ -32,6 +32,7 @@ import { cn } from '@/lib/utils'; import { useRouter } from 'next/navigation'; import { isLocalMode } from '@/lib/config'; import { CustomModelDialog, CustomModelFormData } from './custom-model-dialog'; +import Link from 'next/link'; interface CustomModel { id: string; @@ -674,6 +675,22 @@ export const ModelSelector: React.FC = ({
All Models {isLocalMode() && ( +
+ + + + + + + + + Manage API Keys + + + @@ -694,6 +711,7 @@ export const ModelSelector: React.FC = ({ +
)}
{uniqueModels From 7e5bb3f88099b38a9ba654d13dac263471be7454 Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Sun, 13 Jul 2025 13:04:19 +0530 Subject: [PATCH 03/18] make .env editable in docker --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 094db576..5c539be5 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -37,7 +37,7 @@ services: ports: - "8000:8000" volumes: - - ./backend/.env:/app/.env:ro + - ./backend/.env:/app/.env env_file: - ./backend/.env environment: From e31dc7f8e82e79d15990c88de3d5641c90663449 Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Wed, 16 Jul 2025 21:34:34 +0530 Subject: [PATCH 04/18] Local .Env Manager --- backend/api.py | 4 +- backend/local_env_manager/api.py | 44 ++++++++++++++++++ backend/local_llm/api.py | 29 ------------ backend/utils/local_api_keys.py | 45 ------------------- .../settings/env-manager/page.tsx | 9 ++++ .../(personalAccount)/settings/layout.tsx | 3 +- .../settings/llm-api-keys/page.tsx | 9 ---- .../local-env-manager.tsx} | 19 ++++---- .../sidebar/nav-user-with-teams.tsx | 8 ++-- .../thread/chat-input/model-selector.tsx | 8 ++-- 10 files changed, 75 insertions(+), 103 deletions(-) create mode 100644 backend/local_env_manager/api.py delete mode 100644 backend/local_llm/api.py delete mode 100644 backend/utils/local_api_keys.py create mode 100644 frontend/src/app/(dashboard)/(personalAccount)/settings/env-manager/page.tsx delete mode 100644 frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx rename frontend/src/components/{api-keys/llm-api-keys.tsx => env-manager/local-env-manager.tsx} (88%) diff --git a/backend/api.py b/backend/api.py index 68bf96d8..d85dd01d 100644 --- a/backend/api.py +++ b/backend/api.py @@ -187,8 +187,8 @@ api_router.include_router(workflows_router, prefix="/workflows") from pipedream import api as pipedream_api api_router.include_router(pipedream_api.router) -from local_llm import api as local_llm_api -api_router.include_router(local_llm_api.router) +from local_env_manager import api as local_env_manager_api +api_router.include_router(local_env_manager_api.router) @api_router.get("/health") async def health_check(): diff --git a/backend/local_env_manager/api.py b/backend/local_env_manager/api.py new file mode 100644 index 00000000..8c575699 --- /dev/null +++ b/backend/local_env_manager/api.py @@ -0,0 +1,44 @@ +from fastapi import APIRouter +from utils.config import config, EnvMode +from fastapi import HTTPException +from typing import Dict +from dotenv import load_dotenv, set_key, find_dotenv, dotenv_values +from utils.logger import logger + +router = APIRouter(tags=["local-env-manager"]) + +@router.get("/env-vars") +def get_env_vars() -> Dict[str, str]: + if config.ENV_MODE != EnvMode.LOCAL: + raise HTTPException(status_code=403, detail="Env vars management only available in local mode") + + try: + env_path = find_dotenv() + if not env_path: + logger.error("Could not find .env file") + return {} + + return dotenv_values(env_path) + except Exception as e: + logger.error(f"Failed to get env vars: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get env variables: {e}") + +@router.post("/env-vars") +def save_env_vars(request: Dict[str, str]) -> Dict[str, str]: + if config.ENV_MODE != EnvMode.LOCAL: + raise HTTPException(status_code=403, detail="Env vars management only available in local mode") + + try: + env_path = find_dotenv() + if not env_path: + raise HTTPException(status_code=500, detail="Could not find .env file") + + for key, value in request.items(): + set_key(env_path, key, value) + + load_dotenv(override=True) + logger.info(f"Env variables saved successfully: {request}") + return {"message": "Env variables saved successfully"} + except Exception as e: + logger.error(f"Failed to save env variables: {e}") + raise HTTPException(status_code=500, detail=f"Failed to save env variables: {e}") \ No newline at end of file diff --git a/backend/local_llm/api.py b/backend/local_llm/api.py deleted file mode 100644 index 507adff6..00000000 --- a/backend/local_llm/api.py +++ /dev/null @@ -1,29 +0,0 @@ -from fastapi import APIRouter -from utils.local_api_keys import save_local_api_keys, get_local_api_keys -from utils.config import config, EnvMode -from fastapi import HTTPException -from typing import Dict -from utils.constants import PROVIDERS - -router = APIRouter(tags=["local-llm-keys"]) - -@router.get("/local-llm-keys") -def get_llm_keys() -> Dict[str, str]: - - if config.ENV_MODE != EnvMode.LOCAL: - raise HTTPException(status_code=403, detail="API key management only available in local mode") - - providers = [f"{provider}_API_KEY" for provider in PROVIDERS] - llm_keys = get_local_api_keys(providers) - return llm_keys - -@router.post("/local-llm-keys") -def save_local_llm_keys(request: Dict[str, str]) -> Dict[str, str]: - if config.ENV_MODE != EnvMode.LOCAL: - raise HTTPException(status_code=403, detail="API key management only available in local mode") - - key_saved = save_local_api_keys(request) - if key_saved: - return {"message": "API keys saved successfully"} - else: - raise HTTPException(status_code=500, detail="Failed to save API keys") \ No newline at end of file diff --git a/backend/utils/local_api_keys.py b/backend/utils/local_api_keys.py deleted file mode 100644 index d722393d..00000000 --- a/backend/utils/local_api_keys.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Local API key management for LLMs. - -This module provides functionality to manage API keys in local mode -by reading and writing to the .env file. -""" - -import os -from typing import Dict, Optional, List -from utils.logger import logger -from utils.config import config, EnvMode -from dotenv import load_dotenv, set_key, find_dotenv -from utils.constants import PROVIDERS - -def get_local_api_keys(providers: List[str]) -> Dict[str, str]: - """Get API keys from .env file in local mode.""" - try: - # Load current env vars - load_dotenv(override=True) - - return {provider: os.getenv(provider) or "" for provider in providers} - - except Exception as e: - logger.error(f"Failed to get local API keys: {e}") - return {} - -def save_local_api_keys(api_keys: Dict[str, str]) -> bool: - """Save API keys to .env file in local mode.""" - try: - # Find .env file - env_path = find_dotenv() - if not env_path: - logger.error("Could not find .env file") - return False - - # Update each API key - for key, value in api_keys.items(): - set_key(env_path, key, value) - logger.info(f"Updated {key} in .env file") - - return True - - except Exception as e: - logger.error(f"Failed to save local API keys: {e}") - return False \ No newline at end of file diff --git a/frontend/src/app/(dashboard)/(personalAccount)/settings/env-manager/page.tsx b/frontend/src/app/(dashboard)/(personalAccount)/settings/env-manager/page.tsx new file mode 100644 index 00000000..a07ad48e --- /dev/null +++ b/frontend/src/app/(dashboard)/(personalAccount)/settings/env-manager/page.tsx @@ -0,0 +1,9 @@ +import { isLocalMode } from "@/lib/config"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { Shield } from "lucide-react"; +import { LocalEnvManager } from "@/components/env-manager/local-env-manager"; + +export default function LocalEnvManagerPage() { + + return +} \ No newline at end of file diff --git a/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx b/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx index 196f5869..462f193a 100644 --- a/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx +++ b/frontend/src/app/(dashboard)/(personalAccount)/settings/layout.tsx @@ -3,6 +3,7 @@ import { Separator } from '@/components/ui/separator'; import Link from 'next/link'; import { usePathname } from 'next/navigation'; +import { isLocalMode } from '@/lib/config'; export default function PersonalAccountSettingsPage({ children, @@ -15,7 +16,7 @@ export default function PersonalAccountSettingsPage({ // { name: "Teams", href: "/settings/teams" }, { name: 'Billing', href: '/settings/billing' }, { name: 'Usage Logs', href: '/settings/usage-logs' }, - { name: 'LLM API Keys', href: '/settings/llm-api-keys' }, + ...(isLocalMode() ? [{ name: 'Local .Env Manager', href: '/settings/env-manager' }] : []), ]; return ( <> diff --git a/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx b/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx deleted file mode 100644 index 93a53b9a..00000000 --- a/frontend/src/app/(dashboard)/(personalAccount)/settings/llm-api-keys/page.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { isLocalMode } from "@/lib/config"; -import { Alert, AlertDescription } from "@/components/ui/alert"; -import { Shield } from "lucide-react"; -import { LLMApiKeys } from "@/components/api-keys/llm-api-keys"; - -export default function LLMKeysPage() { - - return -} \ No newline at end of file diff --git a/frontend/src/components/api-keys/llm-api-keys.tsx b/frontend/src/components/env-manager/local-env-manager.tsx similarity index 88% rename from frontend/src/components/api-keys/llm-api-keys.tsx rename to frontend/src/components/env-manager/local-env-manager.tsx index 31b6eec2..9a274c32 100644 --- a/frontend/src/components/api-keys/llm-api-keys.tsx +++ b/frontend/src/components/env-manager/local-env-manager.tsx @@ -16,15 +16,16 @@ interface APIKeyForm { [key: string]: string; } -export function LLMApiKeys() { +export function LocalEnvManager() { const queryClient = useQueryClient(); const [visibleKeys, setVisibleKeys] = useState>({}); const {data: apiKeys, isLoading} = useQuery({ queryKey: ['api-keys'], queryFn: async() => { - const response = await backendApi.get('/local-llm-keys'); + const response = await backendApi.get('/env-vars'); return response.data; }, + enabled: isLocalMode() }); const { register, handleSubmit, formState: { errors, isDirty }, reset } = useForm({ @@ -36,7 +37,7 @@ export function LLMApiKeys() { } const updateApiKeys = useMutation({ mutationFn: async (data: APIKeyForm) => { - const response = await backendApi.post('/local-llm-keys', data); + const response = await backendApi.post('/env-vars', data); await queryClient.invalidateQueries({ queryKey: ['api-keys'] }); return response.data; }, @@ -50,7 +51,7 @@ export function LLMApiKeys() { const keysArray = apiKeys ? Object.entries(apiKeys).map(([key, value]) => ({ id: key, - name: key.replace(/_/g, " ").replace("KEY", "Key"), + name: key, value: value })) : []; @@ -70,7 +71,7 @@ export function LLMApiKeys() { if (isLoading) { return - API Keys + Local .Env Manager Loading... ; @@ -78,15 +79,15 @@ export function LLMApiKeys() { return - API Keys + Local .Env Manager {isLocalMode() ? ( <> - Manage your API keys for various Language Model providers. + Manage your local environment variables ) : ( <> - API key management is only available in local mode. + Local .Env Manager is only available in local mode. )} @@ -95,7 +96,7 @@ export function LLMApiKeys() { {isLocalMode() && (
- {keysArray && keysArray?.map((key: any) => ( + {keysArray && keysArray?.map(key => (
diff --git a/frontend/src/components/sidebar/nav-user-with-teams.tsx b/frontend/src/components/sidebar/nav-user-with-teams.tsx index 9b48983b..f02d150a 100644 --- a/frontend/src/components/sidebar/nav-user-with-teams.tsx +++ b/frontend/src/components/sidebar/nav-user-with-teams.tsx @@ -17,7 +17,7 @@ import { AudioWaveform, Sun, Moon, - Key, + KeyRound, } from 'lucide-react'; import { useAccounts } from '@/hooks/use-accounts'; import NewTeamForm from '@/components/basejump/new-team-form'; @@ -289,9 +289,9 @@ export function NavUserWithTeams({ {isLocalMode() && - - - LLM API Keys + + + Local .Env Manager } {/* diff --git a/frontend/src/components/thread/chat-input/model-selector.tsx b/frontend/src/components/thread/chat-input/model-selector.tsx index 67b3181b..730ddcb6 100644 --- a/frontend/src/components/thread/chat-input/model-selector.tsx +++ b/frontend/src/components/thread/chat-input/model-selector.tsx @@ -14,7 +14,7 @@ import { TooltipTrigger, } from '@/components/ui/tooltip'; import { Button } from '@/components/ui/button'; -import { Check, ChevronDown, Search, AlertTriangle, Crown, ArrowUpRight, Brain, Plus, Edit, Trash, Cpu, Key } from 'lucide-react'; +import { Check, ChevronDown, Search, AlertTriangle, Crown, ArrowUpRight, Brain, Plus, Edit, Trash, Cpu, Key, KeyRound } from 'lucide-react'; import { ModelOption, SubscriptionStatus, @@ -680,14 +680,14 @@ export const ModelSelector: React.FC = ({ - + - Manage API Keys + Local .Env Manager From ea659c7a1e4f09f2d33bf7c9f05522750c2f70ea Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Fri, 18 Jul 2025 23:25:27 +0530 Subject: [PATCH 05/18] feat: add and delete new api key --- .../env-manager/local-env-manager.tsx | 100 +++++++++++++++++- 1 file changed, 95 insertions(+), 5 deletions(-) diff --git a/frontend/src/components/env-manager/local-env-manager.tsx b/frontend/src/components/env-manager/local-env-manager.tsx index 9a274c32..26fe1a22 100644 --- a/frontend/src/components/env-manager/local-env-manager.tsx +++ b/frontend/src/components/env-manager/local-env-manager.tsx @@ -1,6 +1,6 @@ "use client"; -import { Eye, EyeOff } from "lucide-react"; +import { Eye, EyeOff, Plus, Trash } from "lucide-react"; import { Button } from "../ui/button"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "../ui/card"; import { isLocalMode } from "@/lib/config"; @@ -19,6 +19,8 @@ interface APIKeyForm { export function LocalEnvManager() { const queryClient = useQueryClient(); const [visibleKeys, setVisibleKeys] = useState>({}); + const [newApiKeys, setNewApiKeys] = useState<{key: string, value: string, id: string}[]>([]); + const {data: apiKeys, isLoading} = useQuery({ queryKey: ['api-keys'], queryFn: async() => { @@ -33,8 +35,51 @@ export function LocalEnvManager() { }); const handleSave = async (data: APIKeyForm) => { - updateApiKeys.mutate(data); + const duplicate_key = newApiKeys.find(entry => data[entry.key.trim()]); + if (duplicate_key) { + toast.error(`Key ${duplicate_key.key} already exists`); + return; + } + const submitData = { + ...data, + ...Object.fromEntries(newApiKeys.map(entry => [entry.key, entry.value])) + } + + updateApiKeys.mutate(submitData); } + + const handleAddNewKey = () => { + setNewApiKeys([...newApiKeys, {key: "", value: "", id: crypto.randomUUID()}]); + } + + const checkKeyIsDuplicate = (key: string) => { + const trimmedKey = key.trim(); + const keyIsDuplicate = + trimmedKey && + ( + (apiKeys && Object.keys(apiKeys).includes(trimmedKey)) || + newApiKeys.filter(e => e.key.trim() === trimmedKey).length > 1 + ); + return keyIsDuplicate; + } + + const handleNewKeyChange = (id: string, field: string, value: string) => { + setNewApiKeys(prev => + prev.map(entry => entry.id === id ? {...entry, [field]: value} : entry) + ); + } + + const handleDeleteKey = (id: string) => { + setNewApiKeys(prev => prev.filter(entry => entry.id !== id)); + } + + const hasEmptyKeyValues = newApiKeys.some(entry => entry.key.trim() === "" || entry.value.trim() === ""); + const hasDuplicateKeys = (): boolean => { + const allKeys = [...Object.keys(apiKeys || {}), ...newApiKeys.map(entry => entry.key.trim())]; + const uniqueKeys = new Set(allKeys); + return uniqueKeys.size !== allKeys.length; + } + const updateApiKeys = useMutation({ mutationFn: async (data: APIKeyForm) => { const response = await backendApi.post('/env-vars', data); @@ -43,6 +88,7 @@ export function LocalEnvManager() { }, onSuccess: (data) => { toast.success(data.message); + setNewApiKeys([]); }, onError: () => { toast.error('Failed to update API keys'); @@ -119,12 +165,56 @@ export function LocalEnvManager() {
))} - -
+ +
+ {newApiKeys.map(entry => { + const keyIsDuplicate = checkKeyIsDuplicate(entry.key); + return ( + +
+ +
+ handleNewKeyChange(entry.id, 'key', e.target.value)} + /> + handleNewKeyChange(entry.id, 'value', e.target.value)} + /> + +
+ {keyIsDuplicate &&

Key already exists

} +
+ )})} +
+ +
+
From 2263e8ce0000c9ec798699114cf7c53794ffccbe Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Mon, 21 Jul 2025 22:33:23 +0530 Subject: [PATCH 06/18] remove whitespace --- frontend/src/components/env-manager/local-env-manager.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/env-manager/local-env-manager.tsx b/frontend/src/components/env-manager/local-env-manager.tsx index 26fe1a22..9cf6355f 100644 --- a/frontend/src/components/env-manager/local-env-manager.tsx +++ b/frontend/src/components/env-manager/local-env-manager.tsx @@ -42,7 +42,7 @@ export function LocalEnvManager() { } const submitData = { ...data, - ...Object.fromEntries(newApiKeys.map(entry => [entry.key, entry.value])) + ...Object.fromEntries(newApiKeys.map(entry => [entry.key.trim(), entry.value.trim()])) } updateApiKeys.mutate(submitData); From e64af87611e62eaccae45da8d74376dd9c20d5cd Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Mon, 21 Jul 2025 22:55:49 +0530 Subject: [PATCH 07/18] undo providers location --- backend/services/llm.py | 4 ++-- backend/utils/constants.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/backend/services/llm.py b/backend/services/llm.py index 9102dcd8..0ec855a2 100644 --- a/backend/services/llm.py +++ b/backend/services/llm.py @@ -19,7 +19,6 @@ import litellm from litellm.files.main import ModelResponse from utils.logger import logger from utils.config import config -from utils.constants import PROVIDERS # litellm.set_verbose=True litellm.modify_params=True @@ -39,7 +38,8 @@ class LLMRetryError(LLMError): def setup_api_keys() -> None: """Set up API keys from environment variables.""" - for provider in PROVIDERS: + providers = ['OPENAI', 'ANTHROPIC', 'GROQ', 'OPENROUTER', 'XAI'] + for provider in providers: key = getattr(config, f'{provider}_API_KEY') if key: logger.debug(f"API key set for provider: {provider}") diff --git a/backend/utils/constants.py b/backend/utils/constants.py index c70538cf..7d9d4246 100644 --- a/backend/utils/constants.py +++ b/backend/utils/constants.py @@ -168,5 +168,3 @@ MODEL_ACCESS_TIERS = { "tier_125_800": PAID_TIER_MODELS, "tier_200_1000": PAID_TIER_MODELS, } - -PROVIDERS = ['OPENAI', 'ANTHROPIC', 'GROQ', 'OPENROUTER', 'XAI'] \ No newline at end of file From 510ec46aef5b046a6fd2fcaae068843e57db631f Mon Sep 17 00:00:00 2001 From: Anx Date: Wed, 23 Jul 2025 18:59:11 +0800 Subject: [PATCH 08/18] fix: prevent right panel content overflow in ScrollArea component Resolves content overflow issues in the right panel caused by the ScrollArea component's internal div with 'min-width:100%;display:table' styling. This addresses the known shadcn-ui ScrollArea limitation where the component takes up the entire width and causes overflow. The fix ensures proper width constraints while maintaining scroll functionality. References: https://github.com/shadcn-ui/ui/issues/3833 --- frontend/src/components/ui/scroll-area.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/ui/scroll-area.tsx b/frontend/src/components/ui/scroll-area.tsx index e94be071..2c90e48a 100644 --- a/frontend/src/components/ui/scroll-area.tsx +++ b/frontend/src/components/ui/scroll-area.tsx @@ -18,7 +18,7 @@ function ScrollArea({ > {children} From 02ade2280e0dbfe72971a2f639fe2ac1bfb2fc4d Mon Sep 17 00:00:00 2001 From: Vukasin Date: Wed, 23 Jul 2025 23:17:05 +0200 Subject: [PATCH 09/18] fix: refresh token timing --- backend/supabase/config.toml | 2 +- frontend/src/lib/supabase/client.ts | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/backend/supabase/config.toml b/backend/supabase/config.toml index 257acca2..b9233d35 100644 --- a/backend/supabase/config.toml +++ b/backend/supabase/config.toml @@ -113,7 +113,7 @@ jwt_expiry = 3600 enable_refresh_token_rotation = true # Allows refresh tokens to be reused after expiry, up to the specified interval in seconds. # Requires enable_refresh_token_rotation = true. -refresh_token_reuse_interval = 10 +refresh_token_reuse_interval = 86400 # Allow/disallow new user signups to your project. enable_signup = true # Allow/disallow anonymous sign-ins to your project. diff --git a/frontend/src/lib/supabase/client.ts b/frontend/src/lib/supabase/client.ts index 9ee917b0..1a39d2f9 100644 --- a/frontend/src/lib/supabase/client.ts +++ b/frontend/src/lib/supabase/client.ts @@ -14,5 +14,11 @@ export const createClient = () => { // console.log('Supabase URL:', supabaseUrl); // console.log('Supabase Anon Key:', supabaseAnonKey); - return createBrowserClient(supabaseUrl, supabaseAnonKey); + return createBrowserClient(supabaseUrl, supabaseAnonKey, { + auth: { + autoRefreshToken: true, + persistSession: true, + detectSessionInUrl: true + } + }); }; From f702b0ddf2e89bd21ed618df94c93d4542d200df Mon Sep 17 00:00:00 2001 From: Vukasin Date: Wed, 23 Jul 2025 23:25:00 +0200 Subject: [PATCH 10/18] fix: revert grace period for refresh token --- backend/supabase/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/supabase/config.toml b/backend/supabase/config.toml index b9233d35..257acca2 100644 --- a/backend/supabase/config.toml +++ b/backend/supabase/config.toml @@ -113,7 +113,7 @@ jwt_expiry = 3600 enable_refresh_token_rotation = true # Allows refresh tokens to be reused after expiry, up to the specified interval in seconds. # Requires enable_refresh_token_rotation = true. -refresh_token_reuse_interval = 86400 +refresh_token_reuse_interval = 10 # Allow/disallow new user signups to your project. enable_signup = true # Allow/disallow anonymous sign-ins to your project. From fc47e89da8a1563dfcdeb002b9132b310a9f710b Mon Sep 17 00:00:00 2001 From: mykonos-ibiza <222371740+mykonos-ibiza@users.noreply.github.com> Date: Thu, 24 Jul 2025 14:16:04 +0530 Subject: [PATCH 11/18] migrate MFA functionality to frontend and remove backend MFA endpoints --- backend/api.py | 3 +- .../auth/phone_verification_supabase_mfa.py | 591 ------------------ backend/services/billing.py | 1 + frontend/src/components/AuthProvider.tsx | 8 + .../auth/background-aal-checker.tsx | 6 +- frontend/src/lib/api/phone-verification.ts | 55 +- frontend/src/lib/supabase/mfa.ts | 382 +++++++++++ 7 files changed, 403 insertions(+), 643 deletions(-) delete mode 100644 backend/auth/phone_verification_supabase_mfa.py create mode 100644 frontend/src/lib/supabase/mfa.ts diff --git a/backend/api.py b/backend/api.py index ffb3f602..e07c4a83 100644 --- a/backend/api.py +++ b/backend/api.py @@ -187,8 +187,7 @@ api_router.include_router(workflows_router, prefix="/workflows") from pipedream import api as pipedream_api api_router.include_router(pipedream_api.router) -from auth import phone_verification_supabase_mfa -api_router.include_router(phone_verification_supabase_mfa.router) +# MFA functionality moved to frontend @api_router.get("/health") async def health_check(): diff --git a/backend/auth/phone_verification_supabase_mfa.py b/backend/auth/phone_verification_supabase_mfa.py deleted file mode 100644 index a478292f..00000000 --- a/backend/auth/phone_verification_supabase_mfa.py +++ /dev/null @@ -1,591 +0,0 @@ -""" -Auth MFA endpoints for Supabase Phone-based Multi-Factor Authentication (MFA). - -Currently, only SMS is supported as a second factor. Users can enroll their phone number for SMS-based 2FA. -No recovery codes are supported, but users can update their phone number for backup. - -This API provides endpoints to: -- Enroll a phone number for SMS 2FA -- Create a challenge for a phone factor (sends SMS) -- Verify a challenge with SMS code -- Create and verify a challenge in a single step -- List enrolled factors -- Unenroll a factor -- Get Authenticator Assurance Level (AAL) -""" - -import json -import os -from fastapi import APIRouter, Depends, HTTPException, Request -from pydantic import BaseModel, Field -from typing import List, Optional -import jwt -from datetime import datetime, timezone -from supabase import create_client, Client -from utils.auth_utils import get_current_user_id_from_jwt -from utils.config import config -from utils.logger import logger, structlog - -router = APIRouter(prefix="/mfa", tags=["MFA"]) - -# Initialize Supabase client with anon key for user operations -supabase_url = config.SUPABASE_URL -supabase_anon_key = config.SUPABASE_ANON_KEY - -# Cutoff date for new user phone verification requirement -# Users created after this date will be required to have phone verification -# Users created before this date are grandfathered in and not required to verify -PHONE_VERIFICATION_CUTOFF_DATE = datetime(2025, 7, 21, 0, 0, 0, tzinfo=timezone.utc) - -def is_phone_verification_mandatory() -> bool: - """Check if phone verification is mandatory based on environment variable.""" - env_val = os.getenv("PHONE_NUMBER_MANDATORY") - if env_val is None: - return False - return env_val.lower() in ('true', 't', 'yes', 'y', '1') - - -def get_authenticated_client(request: Request) -> Client: - """ - Create a Supabase client authenticated with the user's JWT token. - This approach uses the JWT token directly for server-side authentication. - """ - # Extract the JWT token from the Authorization header - auth_header = request.headers.get("Authorization") - if not auth_header or not auth_header.startswith("Bearer "): - raise HTTPException( - status_code=401, detail="Missing or invalid Authorization header" - ) - - token = auth_header.split(" ")[1] - - # Extract the refresh token from the custom header - refresh_token = request.headers.get("X-Refresh-Token") - - # Create a new Supabase client with the anon key - client = create_client(supabase_url, supabase_anon_key) - - # Set the session with the JWT token - # For server-side operations, we can use the token directly - try: - # Verify the token is valid by getting the user - user_response = client.auth.get_user(token) - if not user_response.user: - raise HTTPException(status_code=401, detail="Invalid token") - - # Set the session with both access and refresh tokens - client.auth.set_session(token, refresh_token) - return client - except Exception as e: - raise HTTPException(status_code=401, detail=f"Authentication failed: {str(e)}") - - -# Request/Response Models -class EnrollFactorRequest(BaseModel): - friendly_name: str = Field(..., description="User-friendly name for the factor") - phone_number: str = Field( - ..., description="Phone number in E.164 format (e.g., +1234567890)" - ) - - -class EnrollFactorResponse(BaseModel): - id: str - friendly_name: str - phone_number: str - # Note: Supabase response may not include status, created_at, updated_at - qr_code: Optional[str] = None - secret: Optional[str] = None - - -class ChallengeRequest(BaseModel): - factor_id: str = Field(..., description="ID of the factor to challenge") - - -class ChallengeResponse(BaseModel): - id: str - expires_at: Optional[str] = None - # Note: Supabase response may not include factor_type, created_at - - -class VerifyRequest(BaseModel): - factor_id: str = Field(..., description="ID of the factor to verify") - challenge_id: str = Field(..., description="ID of the challenge to verify") - code: str = Field(..., description="SMS code received on phone") - - -class ChallengeAndVerifyRequest(BaseModel): - factor_id: str = Field(..., description="ID of the factor to challenge and verify") - code: str = Field(..., description="SMS code received on phone") - - -class FactorInfo(BaseModel): - id: str - friendly_name: Optional[str] = None - factor_type: Optional[str] = None - status: Optional[str] = None - phone: Optional[str] = None - created_at: Optional[str] = None - updated_at: Optional[str] = None - - -class ListFactorsResponse(BaseModel): - factors: List[FactorInfo] - - -class UnenrollRequest(BaseModel): - factor_id: str = Field(..., description="ID of the factor to unenroll") - - -class AALResponse(BaseModel): - current_level: Optional[str] = None - next_level: Optional[str] = None - current_authentication_methods: Optional[List[str]] = None - # Add action guidance based on AAL status - action_required: Optional[str] = None - message: Optional[str] = None - # Phone verification requirement fields - phone_verification_required: Optional[bool] = None - user_created_at: Optional[str] = None - cutoff_date: Optional[str] = None - # Computed verification status fields - verification_required: Optional[bool] = None - is_verified: Optional[bool] = None - factors: Optional[List[dict]] = None - - -@router.post("/enroll", response_model=EnrollFactorResponse) -async def enroll_factor( - request_data: EnrollFactorRequest, - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Enroll a new phone number for SMS-based 2FA. - - Currently only supports 'phone' factor type. - Phone number must be in E.164 format (e.g., +1234567890). - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_enroll", - phone_number=request_data.phone_number, - friendly_name=request_data.friendly_name - ) - - try: - response = client.auth.mfa.enroll( - { - "factor_type": "phone", - "friendly_name": request_data.friendly_name, - "phone": request_data.phone_number, - } - ) - - # Build response with defensive field access - enroll_response = EnrollFactorResponse( - id=response.id, - friendly_name=request_data.friendly_name, # Use request data as fallback - phone_number=request_data.phone_number, - ) - - # Add optional fields if they exist - if hasattr(response, "qr_code"): - enroll_response.qr_code = response.qr_code - if hasattr(response, "secret"): - enroll_response.secret = response.secret - - return enroll_response - except Exception as e: - raise HTTPException( - status_code=400, detail=f"Failed to enroll phone factor: {str(e)}" - ) - - -@router.post("/challenge", response_model=ChallengeResponse) -async def create_challenge( - request_data: ChallengeRequest, - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Create a challenge for an enrolled phone factor. - - This will send an SMS code to the registered phone number. - The challenge must be verified within the time limit. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_challenge", - factor_id=request_data.factor_id - ) - - try: - response = client.auth.mfa.challenge( - { - "factor_id": request_data.factor_id, - } - ) - - # Build response with defensive field access - challenge_response = ChallengeResponse(id=response.id) - - # Add optional fields if they exist - if hasattr(response, "expires_at"): - challenge_response.expires_at = response.expires_at - - return challenge_response - except Exception as e: - raise HTTPException( - status_code=400, detail=f"Failed to create SMS challenge: {str(e)}" - ) - - -@router.post("/verify") -async def verify_challenge( - request_data: VerifyRequest, - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Verify a challenge with an SMS code. - - The challenge must be active and the SMS code must be valid. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_verify", - factor_id=request_data.factor_id, - challenge_id=request_data.challenge_id - ) - - try: - logger.info(f"🔵 Starting MFA verification for user {user_id}: " - f"factor_id={request_data.factor_id}, " - f"challenge_id={request_data.challenge_id}") - - # Check AAL BEFORE verification - try: - aal_before = client.auth.mfa.get_authenticator_assurance_level() - logger.info(f"📊 AAL BEFORE verification: " - f"current={aal_before.current_level}, " - f"next={aal_before.next_level}") - except Exception as e: - logger.warning(f"Failed to get AAL before verification: {e}") - - # Verify the challenge - response = client.auth.mfa.verify( - { - "factor_id": request_data.factor_id, - "challenge_id": request_data.challenge_id, - "code": request_data.code, - } - ) - - logger.info(f"✅ MFA verification successful for user {user_id}") - logger.info(f"Verification response type: {type(response)}") - logger.info(f"Verification response attributes: {dir(response)}") - - # Check if response has session info - if hasattr(response, 'session') and response.session: - logger.info(f"New session info: access_token present: {bool(getattr(response.session, 'access_token', None))}") - logger.info(f"New session user: {getattr(response.session, 'user', None)}") - - # Check AAL AFTER verification - try: - aal_after = client.auth.mfa.get_authenticator_assurance_level() - logger.info(f"📊 AAL AFTER verification: " - f"current={aal_after.current_level}, " - f"next={aal_after.next_level}") - except Exception as e: - logger.warning(f"Failed to get AAL after verification: {e}") - - # Check factor status AFTER verification - try: - user_response = client.auth.get_user() - if user_response.user and hasattr(user_response.user, "factors"): - for factor in user_response.user.factors: - if factor.id == request_data.factor_id: - logger.info(f"Factor {request_data.factor_id} status after verification: {getattr(factor, 'status', 'unknown')}") - break - except Exception as e: - logger.warning(f"Failed to check factor status after verification: {e}") - - return { - "success": True, - "message": "SMS code verified successfully", - "session": response, - } - except Exception as e: - logger.error(f"❌ MFA verification failed for user {user_id}: {str(e)}") - raise HTTPException( - status_code=400, detail=f"Failed to verify SMS code: {str(e)}" - ) - - -@router.post("/challenge-and-verify") -async def challenge_and_verify( - request_data: ChallengeAndVerifyRequest, - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Create a challenge and verify it in a single step. - - This will send an SMS code and verify it immediately when provided. - This is a convenience method that combines challenge creation and verification. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_challenge_and_verify", - factor_id=request_data.factor_id - ) - - try: - response = client.auth.mfa.challenge_and_verify( - {"factor_id": request_data.factor_id, "code": request_data.code} - ) - - return { - "success": True, - "message": "SMS challenge created and verified successfully", - "session": response, - } - except Exception as e: - raise HTTPException( - status_code=400, detail=f"Failed to challenge and verify SMS: {str(e)}" - ) - - -@router.get("/factors", response_model=ListFactorsResponse) -async def list_factors( - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - List all enrolled factors for the authenticated user. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_list_factors" - ) - - try: - # Get user info which includes factors - user_response = client.auth.get_user() - if not user_response.user: - raise HTTPException(status_code=401, detail="User not found") - - # Extract factors from user data with defensive access - factors = [] - if hasattr(user_response.user, "factors") and user_response.user.factors: - for factor in user_response.user.factors: - # Convert datetime objects to strings for Pydantic validation - created_at = getattr(factor, "created_at", None) - if created_at and hasattr(created_at, "isoformat"): - created_at = created_at.isoformat() - - updated_at = getattr(factor, "updated_at", None) - if updated_at and hasattr(updated_at, "isoformat"): - updated_at = updated_at.isoformat() - - factor_info = FactorInfo( - id=factor.id if hasattr(factor, "id") else str(factor), - friendly_name=getattr(factor, "friendly_name", None), - factor_type=getattr(factor, "factor_type", None), - status=getattr(factor, "status", None), - phone=getattr(factor, "phone", None), - created_at=created_at, - updated_at=updated_at, - ) - factors.append(factor_info) - - return ListFactorsResponse(factors=factors) - except Exception as e: - raise HTTPException(status_code=400, detail=f"Failed to list factors: {str(e)}") - - -@router.post("/unenroll") -async def unenroll_factor( - request_data: UnenrollRequest, - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Unenroll a phone factor for the authenticated user. - - This will remove the phone number and invalidate any active sessions if the factor was verified. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_unenroll", - factor_id=request_data.factor_id - ) - - try: - response = client.auth.mfa.unenroll({"factor_id": request_data.factor_id}) - - return {"success": True, "message": "Phone factor unenrolled successfully"} - except Exception as e: - raise HTTPException( - status_code=400, detail=f"Failed to unenroll phone factor: {str(e)}" - ) - - -@router.get("/aal", response_model=AALResponse) -async def get_authenticator_assurance_level( - client: Client = Depends(get_authenticated_client), - user_id: str = Depends(get_current_user_id_from_jwt), -): - """ - Get the Authenticator Assurance Level (AAL) for the current session. - - This endpoint combines AAL status with phone verification requirements: - - aal1 -> aal1: User does not have MFA enrolled - - aal1 -> aal2: User has MFA enrolled but not verified (requires verification) - - aal2 -> aal2: User has verified their MFA factor - - aal2 -> aal1: User has disabled MFA (stale JWT, requires reauthentication) - - Also includes phone verification requirement based on account creation date. - """ - structlog.contextvars.bind_contextvars( - user_id=user_id, - action="mfa_get_aal" - ) - - try: - # Get the current AAL from Supabase - response = client.auth.mfa.get_authenticator_assurance_level() - - # Extract AAL levels from response first - current = response.current_level - next_level = response.next_level - - # Get user creation date and factors for phone verification requirement - user_response = client.auth.get_user() - if not user_response.user: - raise HTTPException(status_code=401, detail="User not found") - - user_created_at = None - if hasattr(user_response.user, 'created_at') and user_response.user.created_at: - try: - # Handle different possible formats for created_at - created_at_value = user_response.user.created_at - if isinstance(created_at_value, str): - # Parse ISO format string - user_created_at = datetime.fromisoformat(created_at_value.replace('Z', '+00:00')) - elif hasattr(created_at_value, 'isoformat'): - # Already a datetime object - user_created_at = created_at_value - if user_created_at.tzinfo is None: - user_created_at = user_created_at.replace(tzinfo=timezone.utc) - else: - logger.warning(f"Unexpected created_at type: {type(created_at_value)}") - except Exception as e: - logger.error(f"Failed to parse user created_at: {e}") - # Fall back to treating as new user for safety - user_created_at = datetime.now(timezone.utc) - - # Determine if this is a new user who needs phone verification - is_new_user = ( - user_created_at is not None and - user_created_at >= PHONE_VERIFICATION_CUTOFF_DATE - ) - - # Get factors and compute phone verification status - factors = [] - phone_factors = [] - has_verified_phone = False - - if hasattr(user_response.user, "factors") and user_response.user.factors: - for factor in user_response.user.factors: - # Convert datetime objects to strings for JSON serialization - created_at = getattr(factor, "created_at", None) - if created_at and hasattr(created_at, "isoformat"): - created_at = created_at.isoformat() - - updated_at = getattr(factor, "updated_at", None) - if updated_at and hasattr(updated_at, "isoformat"): - updated_at = updated_at.isoformat() - - factor_dict = { - "id": factor.id if hasattr(factor, "id") else str(factor), - "friendly_name": getattr(factor, "friendly_name", None), - "factor_type": getattr(factor, "factor_type", None), - "status": getattr(factor, "status", None), - "phone": getattr(factor, "phone", None), - "created_at": created_at, - "updated_at": updated_at, - } - factors.append(factor_dict) - - # Track phone factors - if factor_dict.get("factor_type") == "phone": - phone_factors.append(factor_dict) - if factor_dict.get("status") == "verified": - has_verified_phone = True - - # Determine action required based on AAL combination - action_required = None - message = None - - if current == "aal1" and next_level == "aal1": - # User does not have MFA enrolled - action_required = "none" - message = "MFA is not enrolled for this account" - elif current == "aal1" and next_level == "aal2": - # User has MFA enrolled but needs to verify it - action_required = "verify_mfa" - message = "MFA verification required to access full features" - elif current == "aal2" and next_level == "aal2": - # User has verified their MFA factor - action_required = "none" - message = "MFA is verified and active" - elif current == "aal2" and next_level == "aal1": - # User has disabled MFA or has stale JWT - action_required = "reauthenticate" - message = "Session needs refresh due to MFA changes" - else: - # Unknown combination - action_required = "unknown" - message = f"Unknown AAL combination: {current} -> {next_level}" - - # Determine verification_required based on AAL status AND grandfathering logic - verification_required = False - if is_new_user: - # New users (created after cutoff date) must have phone verification - if current == 'aal1' and next_level == 'aal1': - # No MFA enrolled - new users must enroll - verification_required = True - elif action_required == 'verify_mfa': - # MFA enrolled but needs verification - verification_required = True - else: - # Existing users (grandfathered) - only require verification if AAL demands it - verification_required = action_required == 'verify_mfa' - - phone_verification_required = False and is_new_user and is_phone_verification_mandatory() - verification_required = False and is_new_user and verification_required and is_phone_verification_mandatory() - - logger.info(f"AAL check for user {user_id}: " - f"current_level={current}, " - f"next_level={next_level}, " - f"action_required={action_required}, " - f"phone_verification_required={phone_verification_required}, " - f"verification_required={verification_required}, " - f"is_verified={has_verified_phone}") - - return AALResponse( - current_level=current, - next_level=next_level, - current_authentication_methods=[x.method for x in response.current_authentication_methods], - action_required=action_required, - message=message, - phone_verification_required=phone_verification_required, - user_created_at=user_created_at.isoformat() if user_created_at else None, - cutoff_date=PHONE_VERIFICATION_CUTOFF_DATE.isoformat(), - verification_required=verification_required, - is_verified=has_verified_phone, - factors=factors, - ) - except Exception as e: - raise HTTPException(status_code=400, detail=f"Failed to get AAL: {str(e)}") diff --git a/backend/services/billing.py b/backend/services/billing.py index 1b37a553..117c79a2 100644 --- a/backend/services/billing.py +++ b/backend/services/billing.py @@ -497,6 +497,7 @@ async def check_billing_status(client, user_id: str) -> Tuple[bool, str, Optiona # Calculate current month's usage current_usage = await calculate_monthly_usage(client, user_id) + # TODO: also do user's AAL check # Check if within limits if current_usage >= tier_info['cost']: return False, f"Monthly limit of {tier_info['cost']} dollars reached. Please upgrade your plan or wait until next month.", subscription diff --git a/frontend/src/components/AuthProvider.tsx b/frontend/src/components/AuthProvider.tsx index c5028444..8171002d 100644 --- a/frontend/src/components/AuthProvider.tsx +++ b/frontend/src/components/AuthProvider.tsx @@ -42,11 +42,19 @@ export const AuthProvider = ({ children }: { children: ReactNode }) => { const { data: authListener } = supabase.auth.onAuthStateChange( async (event, newSession) => { + console.log('🔵 Auth state change:', { event, session: !!newSession, user: !!newSession?.user }); + setSession(newSession); setUser(newSession?.user ?? null); if (isLoading) setIsLoading(false); + if (event === 'SIGNED_IN' && newSession?.user) { await checkAndInstallSunaAgent(newSession.user.id, newSession.user.created_at); + } else if (event === 'MFA_CHALLENGE_VERIFIED') { + console.log('✅ MFA challenge verified, session updated'); + // Session is automatically updated by Supabase, just log for debugging + } else if (event === 'TOKEN_REFRESHED') { + console.log('🔄 Token refreshed, session updated'); } }, ); diff --git a/frontend/src/components/auth/background-aal-checker.tsx b/frontend/src/components/auth/background-aal-checker.tsx index d5f7dbbe..6536fb42 100644 --- a/frontend/src/components/auth/background-aal-checker.tsx +++ b/frontend/src/components/auth/background-aal-checker.tsx @@ -64,8 +64,7 @@ export function BackgroundAALChecker({ if (current_level === "aal1" && next_level === "aal1") { // New user has no MFA enrolled - redirect to enrollment console.log('Background: New user without MFA enrolled, redirecting to phone verification'); - // Temporarily disabled - // router.push(redirectTo); + router.push(redirectTo); return; } // If new user has MFA enrolled, follow standard AAL flow below @@ -76,8 +75,7 @@ export function BackgroundAALChecker({ case 'verify_mfa': // User has MFA enrolled but needs to verify it console.log('Background: Redirecting to MFA verification'); - // Temporarily disabled - // router.push(redirectTo); + router.push(redirectTo); break; case 'reauthenticate': diff --git a/frontend/src/lib/api/phone-verification.ts b/frontend/src/lib/api/phone-verification.ts index c10f954e..e9333d76 100644 --- a/frontend/src/lib/api/phone-verification.ts +++ b/frontend/src/lib/api/phone-verification.ts @@ -1,5 +1,4 @@ -import { backendApi } from '@/lib/api-client'; -import { createClient } from '@/lib/supabase/client'; +import { supabaseMFAService } from '@/lib/supabase/mfa'; @@ -78,95 +77,59 @@ export interface AALResponse { export const phoneVerificationService = { - - - /** * Enroll phone number for SMS-based 2FA */ async enrollPhoneNumber(data: PhoneVerificationEnroll): Promise { - const response = await backendApi.post('/mfa/enroll', data); - return response.data; + return await supabaseMFAService.enrollPhoneNumber(data); }, /** * Create a challenge for an enrolled phone factor (sends SMS) */ async createChallenge(data: PhoneVerificationChallenge): Promise { - const response = await backendApi.post('/mfa/challenge', data); - return response.data; + return await supabaseMFAService.createChallenge(data); }, /** * Verify SMS code for phone verification */ async verifyChallenge(data: PhoneVerificationVerify): Promise { - try { - const response = await backendApi.post('/mfa/verify', data); - - // After successful verification, refresh the Supabase session - // This ensures the frontend client gets the updated session with AAL2 tokens - try { - const supabase = createClient(); - await supabase.auth.refreshSession(); - console.log("🔄 Frontend Supabase session refreshed after verification"); - } catch (refreshError) { - console.warn("⚠️ Failed to refresh Supabase session:", refreshError); - } - - return { - success: response.data.success || true, - message: response.data.message || 'SMS code verified successfully' - }; - } catch (error) { - console.error("❌ Verify challenge failed:", error); - throw error; - } + return await supabaseMFAService.verifyChallenge(data); }, /** * Create challenge and verify in one step */ async challengeAndVerify(data: PhoneVerificationChallengeAndVerify): Promise { - const response = await backendApi.post('/mfa/challenge-and-verify', data); - return { - success: response.data.success || true, - message: response.data.message || 'SMS challenge created and verified successfully' - }; + return await supabaseMFAService.challengeAndVerify(data); }, /** * Resend SMS code (create new challenge for existing factor) */ async resendSMS(factorId: string): Promise { - const response = await backendApi.post('/mfa/challenge', { factor_id: factorId }); - return response.data; + return await supabaseMFAService.resendSMS(factorId); }, /** * List all enrolled MFA factors */ async listFactors(): Promise { - const response = await backendApi.get('/mfa/factors'); - return response.data; + return await supabaseMFAService.listFactors(); }, /** * Remove phone verification from account */ async unenrollFactor(factorId: string): Promise { - const response = await backendApi.post('/mfa/unenroll', { factor_id: factorId }); - return { - success: response.data.success || true, - message: response.data.message || 'Phone factor unenrolled successfully' - }; + return await supabaseMFAService.unenrollFactor(factorId); }, /** * Get Authenticator Assurance Level */ async getAAL(): Promise { - const response = await backendApi.get('/mfa/aal'); - return response.data; + return await supabaseMFAService.getAAL(); } }; \ No newline at end of file diff --git a/frontend/src/lib/supabase/mfa.ts b/frontend/src/lib/supabase/mfa.ts new file mode 100644 index 00000000..55e6d1a6 --- /dev/null +++ b/frontend/src/lib/supabase/mfa.ts @@ -0,0 +1,382 @@ +import { createClient } from './client'; +import type { + FactorInfo, + PhoneVerificationEnroll, + PhoneVerificationChallenge, + PhoneVerificationVerify, + PhoneVerificationChallengeAndVerify, + PhoneVerificationResponse, + EnrollFactorResponse, + ChallengeResponse, + ListFactorsResponse, + AALResponse, +} from '@/lib/api/phone-verification'; + +// Cutoff date for new user phone verification requirement +// Users created after this date will be required to have phone verification +// Users created before this date are grandfathered in and not required to verify +const PHONE_VERIFICATION_CUTOFF_DATE = new Date('2025-07-25T00:05:30.000Z'); + +function isPhoneVerificationMandatory(): boolean { + const envVal = process.env.NEXT_PUBLIC_PHONE_NUMBER_MANDATORY; + if (!envVal) return false; + return envVal.toLowerCase() === 'true'; +} + +export const supabaseMFAService = { + /** + * Enroll phone number for SMS-based 2FA + */ + async enrollPhoneNumber(data: PhoneVerificationEnroll): Promise { + const supabase = createClient(); + + try { + const response = await supabase.auth.mfa.enroll({ + factorType: 'phone', + friendlyName: data.friendly_name, + phone: data.phone_number, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + if (!response.data) { + throw new Error('No data returned from enrollment'); + } + + return { + id: response.data.id, + friendly_name: data.friendly_name, + phone_number: data.phone_number, + qr_code: undefined, // Phone factors don't have QR codes + secret: undefined, // Phone factors don't have secrets + }; + } catch (error: any) { + console.error('❌ Enroll phone factor failed:', error); + throw new Error(`Failed to enroll phone factor: ${error.message}`); + } + }, + + /** + * Create a challenge for an enrolled phone factor (sends SMS) + */ + async createChallenge(data: PhoneVerificationChallenge): Promise { + const supabase = createClient(); + + try { + const response = await supabase.auth.mfa.challenge({ + factorId: data.factor_id, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + if (!response.data) { + throw new Error('No data returned from challenge'); + } + + return { + id: response.data.id, + expires_at: response.data.expires_at ? new Date(response.data.expires_at * 1000).toISOString() : undefined, + }; + } catch (error: any) { + console.error('❌ Create SMS challenge failed:', error); + throw new Error(`Failed to create SMS challenge: ${error.message}`); + } + }, + + /** + * Verify SMS code for phone verification + */ + async verifyChallenge(data: PhoneVerificationVerify): Promise { + const supabase = createClient(); + + try { + console.log('🔵 Starting MFA verification with Supabase client'); + + const response = await supabase.auth.mfa.verify({ + factorId: data.factor_id, + challengeId: data.challenge_id, + code: data.code, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + console.log('✅ MFA verification successful'); + + return { + success: true, + message: 'SMS code verified successfully', + }; + } catch (error: any) { + console.error('❌ Verify challenge failed:', error); + throw new Error(`Failed to verify SMS code: ${error.message}`); + } + }, + + /** + * Create challenge and verify in one step + */ + async challengeAndVerify(data: PhoneVerificationChallengeAndVerify): Promise { + const supabase = createClient(); + + try { + const response = await supabase.auth.mfa.challengeAndVerify({ + factorId: data.factor_id, + code: data.code, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + return { + success: true, + message: 'SMS challenge created and verified successfully', + }; + } catch (error: any) { + console.error('❌ Challenge and verify SMS failed:', error); + throw new Error(`Failed to challenge and verify SMS: ${error.message}`); + } + }, + + /** + * Resend SMS code (create new challenge for existing factor) + */ + async resendSMS(factorId: string): Promise { + const supabase = createClient(); + + try { + const response = await supabase.auth.mfa.challenge({ + factorId: factorId, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + if (!response.data) { + throw new Error('No data returned from challenge'); + } + + return { + id: response.data.id, + expires_at: response.data.expires_at ? new Date(response.data.expires_at * 1000).toISOString() : undefined, + }; + } catch (error: any) { + console.error('❌ Resend SMS failed:', error); + throw new Error(`Failed to resend SMS: ${error.message}`); + } + }, + + /** + * List all enrolled MFA factors + */ + async listFactors(): Promise { + const supabase = createClient(); + + try { + const { data: { user }, error } = await supabase.auth.getUser(); + + if (error) { + throw new Error(error.message); + } + + if (!user) { + throw new Error('User not found'); + } + + const factors: FactorInfo[] = []; + + if (user.factors) { + for (const factor of user.factors) { + factors.push({ + id: factor.id, + friendly_name: factor.friendly_name, + factor_type: factor.factor_type, + status: factor.status, + phone: (factor as any).phone, // Phone property may not be in the type definition + created_at: factor.created_at, + updated_at: factor.updated_at, + }); + } + } + + return { factors }; + } catch (error: any) { + console.error('❌ List factors failed:', error); + throw new Error(`Failed to list factors: ${error.message}`); + } + }, + + /** + * Remove phone verification from account + */ + async unenrollFactor(factorId: string): Promise { + const supabase = createClient(); + + try { + const response = await supabase.auth.mfa.unenroll({ + factorId: factorId, + }); + + if (response.error) { + throw new Error(response.error.message); + } + + return { + success: true, + message: 'Phone factor unenrolled successfully', + }; + } catch (error: any) { + console.error('❌ Unenroll factor failed:', error); + throw new Error(`Failed to unenroll phone factor: ${error.message}`); + } + }, + + /** + * Get Authenticator Assurance Level + */ + async getAAL(): Promise { + const supabase = createClient(); + + try { + // Get the current AAL from Supabase + const aalResponse = await supabase.auth.mfa.getAuthenticatorAssuranceLevel(); + + if (aalResponse.error) { + throw new Error(aalResponse.error.message); + } + + // Get user creation date and factors for phone verification requirement + const { data: { user }, error: userError } = await supabase.auth.getUser(); + + if (userError) { + throw new Error(userError.message); + } + + if (!user) { + throw new Error('User not found'); + } + + let userCreatedAt: Date | null = null; + if (user.created_at) { + try { + userCreatedAt = new Date(user.created_at); + } catch (e) { + console.error('Failed to parse user created_at:', e); + // Fall back to treating as new user for safety + userCreatedAt = new Date(); + } + } + + // Determine if this is a new user who needs phone verification + const isNewUser = userCreatedAt && userCreatedAt >= PHONE_VERIFICATION_CUTOFF_DATE; + + // Get factors and compute phone verification status + const factors: any[] = []; + const phoneFactors: any[] = []; + let hasVerifiedPhone = false; + + if (user.factors) { + for (const factor of user.factors) { + const factorInfo = { + id: factor.id, + friendly_name: factor.friendly_name, + factor_type: factor.factor_type, + status: factor.status, + phone: (factor as any).phone, // Phone property may not be in the type definition + created_at: factor.created_at, + updated_at: factor.updated_at, + }; + factors.push(factorInfo); + + if (factor.factor_type === 'phone') { + phoneFactors.push(factorInfo); + if (factor.status === 'verified') { + hasVerifiedPhone = true; + } + } + } + } + + const current = aalResponse.data?.currentLevel; + const nextLevel = aalResponse.data?.nextLevel; + + // Determine action required based on AAL combination + let actionRequired: string = 'none'; + let message: string = ''; + + if (current === 'aal1' && nextLevel === 'aal1') { + // User does not have MFA enrolled + actionRequired = 'none'; + message = 'MFA is not enrolled for this account'; + } else if (current === 'aal1' && nextLevel === 'aal2') { + // User has MFA enrolled but needs to verify it + actionRequired = 'verify_mfa'; + message = 'MFA verification required to access full features'; + } else if (current === 'aal2' && nextLevel === 'aal2') { + // User has verified their MFA factor + actionRequired = 'none'; + message = 'MFA is verified and active'; + } else if (current === 'aal2' && nextLevel === 'aal1') { + // User has disabled MFA or has stale JWT + actionRequired = 'reauthenticate'; + message = 'Session needs refresh due to MFA changes'; + } else { + // Unknown combination + actionRequired = 'unknown'; + message = `Unknown AAL combination: ${current} -> ${nextLevel}`; + } + + // Determine verification_required based on AAL status AND grandfathering logic + let verificationRequired = false; + if (isNewUser) { + // New users (created after cutoff date) must have phone verification + if (current === 'aal1' && nextLevel === 'aal1') { + // No MFA enrolled - new users must enroll + verificationRequired = true; + } else if (actionRequired === 'verify_mfa') { + // MFA enrolled but needs verification + verificationRequired = true; + } + } else { + // Existing users (grandfathered) - only require verification if AAL demands it + verificationRequired = actionRequired === 'verify_mfa'; + } + + const phoneVerificationRequired = isNewUser && isPhoneVerificationMandatory(); + verificationRequired = isNewUser && verificationRequired && isPhoneVerificationMandatory(); + + console.log('AAL check: ', { + current_level: current, + next_level: nextLevel, + action_required: actionRequired, + phone_verification_required: phoneVerificationRequired, + verification_required: verificationRequired, + is_verified: hasVerifiedPhone, + }); + + return { + current_level: current, + next_level: nextLevel, + current_authentication_methods: aalResponse.data?.currentAuthenticationMethods?.map(m => m.method) || [], + action_required: actionRequired, + message: message, + phone_verification_required: phoneVerificationRequired, + user_created_at: userCreatedAt?.toISOString(), + cutoff_date: PHONE_VERIFICATION_CUTOFF_DATE.toISOString(), + verification_required: verificationRequired, + is_verified: hasVerifiedPhone, + factors: factors, + }; + } catch (error: any) { + console.error('❌ Get AAL failed:', error); + throw new Error(`Failed to get AAL: ${error.message}`); + } + }, +}; \ No newline at end of file From e4a6f5a1ef1161c30e93d91f6b10c75601d8953d Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Thu, 24 Jul 2025 20:22:44 +0530 Subject: [PATCH 12/18] fix: auto continue response if finish_reason is length --- backend/agentpress/response_processor.py | 172 +++++++++++++---------- backend/agentpress/thread_manager.py | 29 ++++ 2 files changed, 127 insertions(+), 74 deletions(-) diff --git a/backend/agentpress/response_processor.py b/backend/agentpress/response_processor.py index 470aaf44..70c15bf2 100644 --- a/backend/agentpress/response_processor.py +++ b/backend/agentpress/response_processor.py @@ -146,6 +146,9 @@ class ResponseProcessor: prompt_messages: List[Dict[str, Any]], llm_model: str, config: ProcessorConfig = ProcessorConfig(), + can_auto_continue: bool = False, + auto_continue_count: int = 0, + continuous_state: Optional[Dict[str, Any]] = None, ) -> AsyncGenerator[Dict[str, Any], None]: """Process a streaming LLM response, handling tool calls and execution. @@ -155,13 +158,18 @@ class ResponseProcessor: prompt_messages: List of messages sent to the LLM (the prompt) llm_model: The name of the LLM model used config: Configuration for parsing and execution + can_auto_continue: Whether auto-continue is enabled + auto_continue_count: Number of auto-continue cycles + continuous_state: Previous state of the conversation Yields: Complete message objects matching the DB schema, except for content chunks. """ - accumulated_content = "" + # Initialize from continuous state if provided (for auto-continue) + continuous_state = continuous_state or {} + accumulated_content = continuous_state.get('accumulated_content', "") tool_calls_buffer = {} - current_xml_content = "" + current_xml_content = accumulated_content # equal to accumulated_content if auto-continuing, else blank xml_chunks_buffer = [] pending_tool_executions = [] yielded_tool_indices = set() # Stores indices of tools whose *status* has been yielded @@ -191,26 +199,29 @@ class ResponseProcessor: logger.info(f"Streaming Config: XML={config.xml_tool_calling}, Native={config.native_tool_calling}, " f"Execute on stream={config.execute_on_stream}, Strategy={config.tool_execution_strategy}") - thread_run_id = str(uuid.uuid4()) + # Reuse thread_run_id for auto-continue or create new one + thread_run_id = continuous_state.get('thread_run_id') or str(uuid.uuid4()) + continuous_state['thread_run_id'] = thread_run_id try: - # --- Save and Yield Start Events --- - start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id} - start_msg_obj = await self.add_message( - thread_id=thread_id, type="status", content=start_content, - is_llm_message=False, metadata={"thread_run_id": thread_run_id} - ) - if start_msg_obj: yield format_for_yield(start_msg_obj) + # --- Save and Yield Start Events (only if not auto-continuing) --- + if auto_continue_count == 0: + start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id} + start_msg_obj = await self.add_message( + thread_id=thread_id, type="status", content=start_content, + is_llm_message=False, metadata={"thread_run_id": thread_run_id} + ) + if start_msg_obj: yield format_for_yield(start_msg_obj) - assist_start_content = {"status_type": "assistant_response_start"} - assist_start_msg_obj = await self.add_message( - thread_id=thread_id, type="status", content=assist_start_content, - is_llm_message=False, metadata={"thread_run_id": thread_run_id} - ) - if assist_start_msg_obj: yield format_for_yield(assist_start_msg_obj) + assist_start_content = {"status_type": "assistant_response_start"} + assist_start_msg_obj = await self.add_message( + thread_id=thread_id, type="status", content=assist_start_content, + is_llm_message=False, metadata={"thread_run_id": thread_run_id} + ) + if assist_start_msg_obj: yield format_for_yield(assist_start_msg_obj) # --- End Start Events --- - __sequence = 0 + __sequence = continuous_state.get('sequence', 0) # get the sequence from the previous auto-continue cycle async for chunk in llm_response: # Extract streaming metadata from chunks @@ -493,7 +504,9 @@ class ResponseProcessor: self.trace.event(name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls", level="DEFAULT", status_message=(f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")) # --- SAVE and YIELD Final Assistant Message --- - if accumulated_content: + # Only save assistant message if NOT auto-continuing due to length to avoid duplicate messages + should_auto_continue = (can_auto_continue and finish_reason == 'length') + if accumulated_content and not should_auto_continue: # ... (Truncate accumulated_content logic) ... if config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls and xml_chunks_buffer: last_xml_chunk = xml_chunks_buffer[-1] @@ -746,53 +759,56 @@ class ResponseProcessor: return # --- Save and Yield assistant_response_end --- - if last_assistant_message_object: # Only save if assistant message was saved - try: - # Calculate response time if we have timing data - if streaming_metadata["first_chunk_time"] and streaming_metadata["last_chunk_time"]: - streaming_metadata["response_ms"] = (streaming_metadata["last_chunk_time"] - streaming_metadata["first_chunk_time"]) * 1000 + # Only save assistant_response_end if not auto-continuing (response is actually complete) + should_auto_continue = (can_auto_continue and finish_reason == 'length') + if not should_auto_continue: + if last_assistant_message_object: # Only save if assistant message was saved + try: + # Calculate response time if we have timing data + if streaming_metadata["first_chunk_time"] and streaming_metadata["last_chunk_time"]: + streaming_metadata["response_ms"] = (streaming_metadata["last_chunk_time"] - streaming_metadata["first_chunk_time"]) * 1000 - # Create a LiteLLM-like response object for streaming - # Check if we have any actual usage data - has_usage_data = ( - streaming_metadata["usage"]["prompt_tokens"] > 0 or - streaming_metadata["usage"]["completion_tokens"] > 0 or - streaming_metadata["usage"]["total_tokens"] > 0 - ) - - assistant_end_content = { - "choices": [ - { - "finish_reason": finish_reason or "stop", - "index": 0, - "message": { - "role": "assistant", - "content": accumulated_content, - "tool_calls": complete_native_tool_calls or None + # Create a LiteLLM-like response object for streaming + # Check if we have any actual usage data + has_usage_data = ( + streaming_metadata["usage"]["prompt_tokens"] > 0 or + streaming_metadata["usage"]["completion_tokens"] > 0 or + streaming_metadata["usage"]["total_tokens"] > 0 + ) + + assistant_end_content = { + "choices": [ + { + "finish_reason": finish_reason or "stop", + "index": 0, + "message": { + "role": "assistant", + "content": accumulated_content, + "tool_calls": complete_native_tool_calls or None + } } - } - ], - "created": streaming_metadata.get("created"), - "model": streaming_metadata.get("model", llm_model), - "usage": streaming_metadata["usage"], # Always include usage like LiteLLM does - "streaming": True, # Add flag to indicate this was reconstructed from streaming - } - - # Only include response_ms if we have timing data - if streaming_metadata.get("response_ms"): - assistant_end_content["response_ms"] = streaming_metadata["response_ms"] - - await self.add_message( - thread_id=thread_id, - type="assistant_response_end", - content=assistant_end_content, - is_llm_message=False, - metadata={"thread_run_id": thread_run_id} - ) - logger.info("Assistant response end saved for stream") - except Exception as e: - logger.error(f"Error saving assistant response end for stream: {str(e)}") - self.trace.event(name="error_saving_assistant_response_end_for_stream", level="ERROR", status_message=(f"Error saving assistant response end for stream: {str(e)}")) + ], + "created": streaming_metadata.get("created"), + "model": streaming_metadata.get("model", llm_model), + "usage": streaming_metadata["usage"], # Always include usage like LiteLLM does + "streaming": True, # Add flag to indicate this was reconstructed from streaming + } + + # Only include response_ms if we have timing data + if streaming_metadata.get("response_ms"): + assistant_end_content["response_ms"] = streaming_metadata["response_ms"] + + await self.add_message( + thread_id=thread_id, + type="assistant_response_end", + content=assistant_end_content, + is_llm_message=False, + metadata={"thread_run_id": thread_run_id} + ) + logger.info("Assistant response end saved for stream") + except Exception as e: + logger.error(f"Error saving assistant response end for stream: {str(e)}") + self.trace.event(name="error_saving_assistant_response_end_for_stream", level="ERROR", status_message=(f"Error saving assistant response end for stream: {str(e)}")) except Exception as e: logger.error(f"Error processing stream: {str(e)}", exc_info=True) @@ -815,17 +831,25 @@ class ResponseProcessor: raise # Use bare 'raise' to preserve the original exception with its traceback finally: - # Save and Yield the final thread_run_end status - try: - end_content = {"status_type": "thread_run_end"} - end_msg_obj = await self.add_message( - thread_id=thread_id, type="status", content=end_content, - is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None} - ) - if end_msg_obj: yield format_for_yield(end_msg_obj) - except Exception as final_e: - logger.error(f"Error in finally block: {str(final_e)}", exc_info=True) - self.trace.event(name="error_in_finally_block", level="ERROR", status_message=(f"Error in finally block: {str(final_e)}")) + # Update continuous state for potential auto-continue + should_auto_continue = (can_auto_continue and finish_reason == 'length') + if should_auto_continue: + continuous_state['accumulated_content'] = accumulated_content + continuous_state['sequence'] = __sequence + + logger.info(f"Updated continuous state for auto-continue with {len(accumulated_content)} chars") + else: + # Save and Yield the final thread_run_end status (only if not auto-continuing and finish_reason is not 'length') + try: + end_content = {"status_type": "thread_run_end"} + end_msg_obj = await self.add_message( + thread_id=thread_id, type="status", content=end_content, + is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None} + ) + if end_msg_obj: yield format_for_yield(end_msg_obj) + except Exception as final_e: + logger.error(f"Error in finally block: {str(final_e)}", exc_info=True) + self.trace.event(name="error_in_finally_block", level="ERROR", status_message=(f"Error in finally block: {str(final_e)}")) async def process_non_streaming_response( self, diff --git a/backend/agentpress/thread_manager.py b/backend/agentpress/thread_manager.py index a4d1d974..48e7aa98 100644 --- a/backend/agentpress/thread_manager.py +++ b/backend/agentpress/thread_manager.py @@ -297,6 +297,12 @@ Here are the XML tools available with examples: # Control whether we need to auto-continue due to tool_calls finish reason auto_continue = True auto_continue_count = 0 + + # Shared state for continuous streaming across auto-continues + continuous_state = { + 'accumulated_content': '', + 'thread_run_id': None + } # Define inner function to handle a single run async def _run_once(temp_msg=None): @@ -342,6 +348,18 @@ Here are the XML tools available with examples: prepared_messages.append(temp_msg) logger.debug("Added temporary message to the end of prepared messages") + # Add partial assistant content for auto-continue context (without saving to DB) + if auto_continue_count > 0 and continuous_state.get('accumulated_content'): + partial_content = continuous_state.get('accumulated_content', '') + + # Create temporary assistant message with just the text content + temporary_assistant_message = { + "role": "assistant", + "content": partial_content + } + prepared_messages.append(temporary_assistant_message) + logger.info(f"Added temporary assistant message with {len(partial_content)} chars for auto-continue context") + # 4. Prepare tools for LLM call openapi_tool_schemas = None if config.native_tool_calling: @@ -395,6 +413,9 @@ Here are the XML tools available with examples: config=config, prompt_messages=prepared_messages, llm_model=llm_model, + can_auto_continue=(native_max_auto_continues > 0), + auto_continue_count=auto_continue_count, + continuous_state=continuous_state ) else: # Fallback to non-streaming if response is not iterable @@ -467,6 +488,14 @@ Here are the XML tools available with examples: auto_continue = False # Still yield the chunk to inform the client + elif chunk.get('type') == 'status': + # if the finish reason is length, auto-continue + content = json.loads(chunk.get('content')) + if content.get('finish_reason') == 'length': + logger.info(f"Detected finish_reason='length', auto-continuing ({auto_continue_count + 1}/{native_max_auto_continues})") + auto_continue = True + auto_continue_count += 1 + continue # Otherwise just yield the chunk normally yield chunk else: From 4759b8987129660dfeee0bad2e82e40fecc7aa64 Mon Sep 17 00:00:00 2001 From: Bobbie <126939298+escapade-mckv@users.noreply.github.com> Date: Thu, 24 Jul 2025 20:49:51 +0530 Subject: [PATCH 13/18] fix Dockerfile too many workers --- backend/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/Dockerfile b/backend/Dockerfile index eda348f0..456e8be2 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -15,7 +15,7 @@ COPY . . # Calculate optimal worker count based on 16 vCPUs # Using (2*CPU)+1 formula for CPU-bound applications -ENV WORKERS=33 +ENV WORKERS=7 ENV THREADS=2 ENV WORKER_CONNECTIONS=2000 From f99027b848b71d7bfabb298fc45f1efe0b738c4d Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Thu, 24 Jul 2025 20:51:21 +0530 Subject: [PATCH 14/18] remove multiple var should_auto_continue --- backend/agentpress/response_processor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/agentpress/response_processor.py b/backend/agentpress/response_processor.py index 70c15bf2..84059a77 100644 --- a/backend/agentpress/response_processor.py +++ b/backend/agentpress/response_processor.py @@ -176,6 +176,7 @@ class ResponseProcessor: tool_index = 0 xml_tool_call_count = 0 finish_reason = None + should_auto_continue = False last_assistant_message_object = None # Store the final saved assistant message object tool_result_message_objects = {} # tool_index -> full saved message object has_printed_thinking_prefix = False # Flag for printing thinking prefix only once @@ -503,9 +504,11 @@ class ResponseProcessor: logger.info(f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls") self.trace.event(name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls", level="DEFAULT", status_message=(f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")) + # Calculate if auto-continue is needed if the finish reason is length + should_auto_continue = (can_auto_continue and finish_reason == 'length') + # --- SAVE and YIELD Final Assistant Message --- # Only save assistant message if NOT auto-continuing due to length to avoid duplicate messages - should_auto_continue = (can_auto_continue and finish_reason == 'length') if accumulated_content and not should_auto_continue: # ... (Truncate accumulated_content logic) ... if config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls and xml_chunks_buffer: @@ -760,7 +763,6 @@ class ResponseProcessor: # --- Save and Yield assistant_response_end --- # Only save assistant_response_end if not auto-continuing (response is actually complete) - should_auto_continue = (can_auto_continue and finish_reason == 'length') if not should_auto_continue: if last_assistant_message_object: # Only save if assistant message was saved try: @@ -832,7 +834,6 @@ class ResponseProcessor: finally: # Update continuous state for potential auto-continue - should_auto_continue = (can_auto_continue and finish_reason == 'length') if should_auto_continue: continuous_state['accumulated_content'] = accumulated_content continuous_state['sequence'] = __sequence From 64b12fb719ebfa5ad6b39e8c1c731f90d9d50ee9 Mon Sep 17 00:00:00 2001 From: Vukasin Date: Thu, 24 Jul 2025 17:49:32 +0200 Subject: [PATCH 15/18] fix: user reset --- frontend/src/components/AuthProvider.tsx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/frontend/src/components/AuthProvider.tsx b/frontend/src/components/AuthProvider.tsx index c5028444..3386dfb1 100644 --- a/frontend/src/components/AuthProvider.tsx +++ b/frontend/src/components/AuthProvider.tsx @@ -43,7 +43,13 @@ export const AuthProvider = ({ children }: { children: ReactNode }) => { const { data: authListener } = supabase.auth.onAuthStateChange( async (event, newSession) => { setSession(newSession); - setUser(newSession?.user ?? null); + + // Only update user state on actual auth events, not token refresh + if (event === 'SIGNED_IN' || event === 'SIGNED_OUT') { + setUser(newSession?.user ?? null); + } + // For TOKEN_REFRESHED events, keep the existing user state + if (isLoading) setIsLoading(false); if (event === 'SIGNED_IN' && newSession?.user) { await checkAndInstallSunaAgent(newSession.user.id, newSession.user.created_at); From c05ec3e61b966d4ff5b3e74c3ce3bec130b8fe41 Mon Sep 17 00:00:00 2001 From: Krishav Raj Singh Date: Thu, 24 Jul 2025 23:42:11 +0530 Subject: [PATCH 16/18] removes one tap login --- frontend/src/components/GoogleSignIn.tsx | 214 +++-------------------- 1 file changed, 26 insertions(+), 188 deletions(-) diff --git a/frontend/src/components/GoogleSignIn.tsx b/frontend/src/components/GoogleSignIn.tsx index a4692010..e4d64dee 100644 --- a/frontend/src/components/GoogleSignIn.tsx +++ b/frontend/src/components/GoogleSignIn.tsx @@ -1,220 +1,58 @@ 'use client'; -import { useEffect, useState } from 'react'; -import Script from 'next/script'; +import { useState } from 'react'; import { createClient } from '@/lib/supabase/client'; -import { useAuthMethodTracking } from '@/lib/stores/auth-tracking'; import { toast } from 'sonner'; import { FcGoogle } from "react-icons/fc"; import { Loader2 } from 'lucide-react'; -declare global { - interface Window { - google?: { - accounts: { - id: { - initialize: (config: any) => void; - renderButton: (element: HTMLElement, config: any) => void; - prompt: (notification?: (notification: any) => void) => void; - }; - }; - }; - } -} - interface GoogleSignInProps { returnUrl?: string; } export default function GoogleSignIn({ returnUrl }: GoogleSignInProps) { const [isLoading, setIsLoading] = useState(false); - const [isGoogleLoaded, setIsGoogleLoaded] = useState(false); - const { wasLastMethod, markAsUsed } = useAuthMethodTracking('google'); const supabase = createClient(); - const handleGoogleResponse = async (response: any) => { + const handleGoogleSignIn = async () => { try { setIsLoading(true); - markAsUsed(); - - const { data, error } = await supabase.auth.signInWithIdToken({ + console.log('returnUrl', returnUrl); + + const { error } = await supabase.auth.signInWithOAuth({ provider: 'google', - token: response.credential, + options: { + redirectTo: `${window.location.origin}/auth/callback${ + returnUrl ? `?returnUrl=${encodeURIComponent(returnUrl)}` : '' + }`, + }, }); if (error) { - const redirectTo = `${window.location.origin}/auth/callback${returnUrl ? `?returnUrl=${encodeURIComponent(returnUrl)}` : ''}`; - console.log('OAuth redirect URI:', redirectTo); - - const { error: oauthError } = await supabase.auth.signInWithOAuth({ - provider: 'google', - options: { - redirectTo, - }, - }); - - if (oauthError) { - throw oauthError; - } - } else { - window.location.href = returnUrl || '/dashboard'; + throw error; } } catch (error: any) { console.error('Google sign-in error:', error); - - if (error.message?.includes('redirect_uri_mismatch')) { - const redirectUri = `${window.location.origin}/auth/callback`; - toast.error( - `Google OAuth configuration error. Add this exact URL to your Google Cloud Console: ${redirectUri}`, - { duration: 10000 } - ); - } else { - toast.error(error.message || 'Failed to sign in with Google'); - } - + toast.error(error.message || 'Failed to sign in with Google'); setIsLoading(false); } }; - useEffect(() => { - const initializeGoogleSignIn = () => { - if (!window.google || !process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID) return; - - window.google.accounts.id.initialize({ - client_id: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID, - callback: handleGoogleResponse, - auto_select: false, - cancel_on_tap_outside: false, - }); - - setIsGoogleLoaded(true); - }; - - if (window.google) { - initializeGoogleSignIn(); - } - }, [returnUrl, markAsUsed, supabase]); - - const handleScriptLoad = () => { - if (window.google && process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID) { - window.google.accounts.id.initialize({ - client_id: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID, - callback: handleGoogleResponse, - auto_select: false, - cancel_on_tap_outside: false, - }); - - setIsGoogleLoaded(true); - } - }; - - const handleGoogleSignIn = () => { - if (!window.google || !isGoogleLoaded) { - toast.error('Google Sign-In is still loading. Please try again.'); - return; - } - - try { - window.google.accounts.id.prompt((notification: any) => { - if (notification.isNotDisplayed() || notification.isSkippedMoment()) { - console.log('One Tap not displayed, using OAuth flow'); - setIsLoading(true); - - const redirectTo = `${window.location.origin}/auth/callback${returnUrl ? `?returnUrl=${encodeURIComponent(returnUrl)}` : ''}`; - console.log('OAuth redirect URI:', redirectTo); - - supabase.auth.signInWithOAuth({ - provider: 'google', - options: { - redirectTo, - }, - }).then(({ error }) => { - if (error) { - console.error('OAuth error:', error); - - if (error.message?.includes('redirect_uri_mismatch')) { - const redirectUri = `${window.location.origin}/auth/callback`; - toast.error( - `Google OAuth configuration error. Add this exact URL to your Google Cloud Console: ${redirectUri}`, - { duration: 10000 } - ); - } else { - toast.error(error.message || 'Failed to sign in with Google'); - } - - setIsLoading(false); - } - }); - } - }); - } catch (error) { - console.error('Error triggering Google sign-in:', error); - setIsLoading(true); - - const redirectTo = `${window.location.origin}/auth/callback${returnUrl ? `?returnUrl=${encodeURIComponent(returnUrl)}` : ''}`; - supabase.auth.signInWithOAuth({ - provider: 'google', - options: { - redirectTo, - }, - }).then(({ error }) => { - if (error) { - console.error('OAuth error:', error); - - if (error.message?.includes('redirect_uri_mismatch')) { - const redirectUri = `${window.location.origin}/auth/callback`; - toast.error( - `Google OAuth configuration error. Add this exact URL to your Google Cloud Console: ${redirectUri}`, - { duration: 10000 } - ); - } else { - toast.error(error.message || 'Failed to sign in with Google'); - } - - setIsLoading(false); - } - }); - } - }; - - if (!process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID) { - return ( -
- Google Sign-In not configured -
- ); - } - return ( -
- - - {wasLastMethod && ( -
-
-
+