diff --git a/packages/ai/src/agents/analytics-engineer-agent/analytics-engineer-agent.ts b/packages/ai/src/agents/analytics-engineer-agent/analytics-engineer-agent.ts index a394ec51b..d56ed0864 100644 --- a/packages/ai/src/agents/analytics-engineer-agent/analytics-engineer-agent.ts +++ b/packages/ai/src/agents/analytics-engineer-agent/analytics-engineer-agent.ts @@ -36,8 +36,7 @@ export function createAnalyticsEngineerAgent( tools: toolSet, messages: [systemMessage, ...messages], stopWhen: STOP_CONDITIONS, - toolChoice: 'required', - maxOutputTokens: 10000, + maxOutputTokens: 64000, // temperature: 0, }); diff --git a/packages/ai/src/llm/providers/gateway.ts b/packages/ai/src/llm/providers/gateway.ts index c80e0fc66..17c76adc9 100644 --- a/packages/ai/src/llm/providers/gateway.ts +++ b/packages/ai/src/llm/providers/gateway.ts @@ -7,12 +7,20 @@ export type GatewayProviderOrder = string[]; export type AnthropicOptions = { cacheControl?: { type: 'ephemeral' }; + thinking?: { + type: 'enabled'; + budgetTokens: number; + }; }; export type BedrockOptions = { cachePoint?: { type: 'default' }; additionalModelRequestFields?: { anthropic_beta?: string[]; + reasoning_config?: { + type: 'enabled'; + budget_tokens: number; + }; }; }; @@ -48,33 +56,51 @@ export const DEFAULT_ANTHROPIC_OPTIONS: AnthropicProviderOptions = { anthropic: { cacheControl: { type: 'ephemeral' }, thinking: { - type: 'enabled', - budgetTokens: 10000 // Set desired tokens for reasoning - } - }, + type: 'enabled', + budgetTokens: 10000 // Set desired tokens for reasoning + } + }, bedrock: { cachePoint: { type: 'default' }, additionalModelRequestFields: { anthropic_beta: ['fine-grained-tool-streaming-2025-05-14'], reasoning_config: { - type: 'enabled', - budget_tokens: 10000 // Adjust as needed - } + type: 'enabled', + budget_tokens: 10000 // Adjust as needed + } }, }, }; -export const DEFAULT_ANALYTICS_ENGINEER_OPTIONS: OpenAIProviderOptions = { +export const DEFAULT_ANALYTICS_ENGINEER_OPTIONS = { gateway: { - order: ['openai'], + only: ['bedrock'], }, openai: { parallelToolCalls: true, - reasoningEffort: 'medium', + parallel_tool_calls: true, + reasoningEffort: 'high', verbosity: 'low', include: ['reasoning.encrypted_content'], store: false, }, + anthropic: { + cacheControl: { type: 'ephemeral' }, + thinking: { + type: 'enabled', + budgetTokens: 10000 // Set desired tokens for reasoning + } + }, + bedrock: { + cachePoint: { type: 'default' }, + additionalModelRequestFields: { + anthropic_beta: ['fine-grained-tool-streaming-2025-05-14'], + reasoning_config: { + type: 'enabled', + budget_tokens: 10000 // Adjust as needed + } + }, + }, }; export const DEFAULT_OPENAI_OPTIONS: OpenAIProviderOptions = {