mirror of https://github.com/buster-so/buster.git
healing messages
This commit is contained in:
parent
aaae50a32f
commit
1d545a009c
|
@ -1,4 +1,4 @@
|
|||
import { type ModelMessage, hasToolCall, stepCountIs, streamText } from 'ai';
|
||||
import { type ModelMessage, NoSuchToolError, hasToolCall, stepCountIs, streamText } from 'ai';
|
||||
import { wrapTraced } from 'braintrust';
|
||||
import z from 'zod';
|
||||
import {
|
||||
|
@ -9,7 +9,7 @@ import {
|
|||
modifyMetrics,
|
||||
} from '../../tools';
|
||||
import { Sonnet4 } from '../../utils/models/sonnet-4';
|
||||
import { healToolWithLlm } from '../../utils/tool-call-repair';
|
||||
import { createNoSuchToolHealingMessage, healToolWithLlm } from '../../utils/tool-call-repair';
|
||||
import { getAnalystAgentSystemPrompt } from './get-analyst-agent-system-prompt';
|
||||
|
||||
const DEFAULT_CACHE_OPTIONS = {
|
||||
|
@ -46,6 +46,12 @@ export function createAnalystAgent(analystAgentOptions: AnalystAgentOptions) {
|
|||
} as ModelMessage;
|
||||
|
||||
async function stream({ messages }: AnalystStreamOptions) {
|
||||
const maxRetries = 2;
|
||||
let attempt = 0;
|
||||
const currentMessages = [...messages];
|
||||
|
||||
while (attempt <= maxRetries) {
|
||||
try {
|
||||
return wrapTraced(
|
||||
() =>
|
||||
streamText({
|
||||
|
@ -57,7 +63,7 @@ export function createAnalystAgent(analystAgentOptions: AnalystAgentOptions) {
|
|||
modifyDashboards,
|
||||
doneTool,
|
||||
},
|
||||
messages: [systemMessage, ...messages],
|
||||
messages: [systemMessage, ...currentMessages],
|
||||
stopWhen: STOP_CONDITIONS,
|
||||
toolChoice: 'required',
|
||||
maxOutputTokens: 10000,
|
||||
|
@ -69,6 +75,35 @@ export function createAnalystAgent(analystAgentOptions: AnalystAgentOptions) {
|
|||
name: 'Analyst Agent',
|
||||
}
|
||||
)();
|
||||
} catch (error) {
|
||||
attempt++;
|
||||
|
||||
// Only retry for NoSuchToolError
|
||||
if (!NoSuchToolError.isInstance(error) || attempt > maxRetries) {
|
||||
console.error('Error in analyst agent:', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Add healing message and retry
|
||||
const healingMessage = createNoSuchToolHealingMessage(
|
||||
error,
|
||||
`createMetrics, modifyMetrics, createDashboards, modifyDashboards, createReports, modifyReports, doneTool are the tools that are available to you at this moment.
|
||||
|
||||
The previous phase of the workflow was the think and prep phase that has access to the following tools:
|
||||
sequentialThinking, executeSql, respondWithoutAssetCreation, submitThoughts, messageUserClarifyingQuestion
|
||||
|
||||
However, you don't have access to any of those tools at this moment.
|
||||
`
|
||||
);
|
||||
currentMessages.push(healingMessage);
|
||||
|
||||
console.info(
|
||||
`Retrying analyst agent after NoSuchToolError (attempt ${attempt}/${maxRetries})`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Max retry attempts exceeded');
|
||||
}
|
||||
|
||||
async function getSteps() {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { type ModelMessage, hasToolCall, stepCountIs, streamText } from 'ai';
|
||||
import { type ModelMessage, NoSuchToolError, hasToolCall, stepCountIs, streamText } from 'ai';
|
||||
import { wrapTraced } from 'braintrust';
|
||||
import z from 'zod';
|
||||
import {
|
||||
|
@ -9,6 +9,7 @@ import {
|
|||
submitThoughts,
|
||||
} from '../../tools';
|
||||
import { Sonnet4 } from '../../utils/models/sonnet-4';
|
||||
import { createNoSuchToolHealingMessage } from '../../utils/tool-call-repair';
|
||||
import { getThinkAndPrepAgentSystemPrompt } from './get-think-and-prep-agent-system-prompt';
|
||||
|
||||
const DEFAULT_CACHE_OPTIONS = {
|
||||
|
@ -47,7 +48,13 @@ export function createThinkAndPrepAgent(thinkAndPrepAgentSchema: ThinkAndPrepAge
|
|||
} as ModelMessage;
|
||||
|
||||
async function stream({ messages }: ThinkAndPrepStreamOptions) {
|
||||
return wrapTraced(
|
||||
const maxRetries = 2;
|
||||
let attempt = 0;
|
||||
const currentMessages = [...messages];
|
||||
|
||||
while (attempt <= maxRetries) {
|
||||
try {
|
||||
return await wrapTraced(
|
||||
() =>
|
||||
streamText({
|
||||
model: Sonnet4,
|
||||
|
@ -58,7 +65,7 @@ export function createThinkAndPrepAgent(thinkAndPrepAgentSchema: ThinkAndPrepAge
|
|||
submitThoughts,
|
||||
messageUserClarifyingQuestion,
|
||||
},
|
||||
messages: [systemMessage, ...messages],
|
||||
messages: [systemMessage, ...currentMessages],
|
||||
stopWhen: STOP_CONDITIONS,
|
||||
toolChoice: 'required',
|
||||
maxOutputTokens: 10000,
|
||||
|
@ -68,6 +75,34 @@ export function createThinkAndPrepAgent(thinkAndPrepAgentSchema: ThinkAndPrepAge
|
|||
name: 'Think and Prep Agent',
|
||||
}
|
||||
)();
|
||||
} catch (error) {
|
||||
attempt++;
|
||||
|
||||
// Only retry for NoSuchToolError
|
||||
if (!NoSuchToolError.isInstance(error) || attempt > maxRetries) {
|
||||
console.error('Error in think and prep agent:', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Add healing message and retry
|
||||
const healingMessage = createNoSuchToolHealingMessage(
|
||||
error,
|
||||
`sequentialThinking, executeSql, respondWithoutAssetCreation, submitThoughts, messageUserClarifyingQuestion are the tools that are available to you at this moment.
|
||||
|
||||
The next phase of the workflow will be the analyst that has access to the following tools:
|
||||
createMetrics, modifyMetrics, createDashboards, modifyDashboards, doneTool
|
||||
|
||||
You'll be able to use those when they are available to you.`
|
||||
);
|
||||
currentMessages.push(healingMessage);
|
||||
|
||||
console.info(
|
||||
`Retrying think and prep agent after NoSuchToolError (attempt ${attempt}/${maxRetries})`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Max retry attempts exceeded');
|
||||
}
|
||||
|
||||
async function getSteps() {
|
||||
|
|
|
@ -1,61 +1,44 @@
|
|||
import { updateChat, updateMessage } from '@buster/database';
|
||||
import { createStep } from '@mastra/core';
|
||||
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
||||
import { generateObject } from 'ai';
|
||||
import type { CoreMessage } from 'ai';
|
||||
import type { ModelMessage } from 'ai';
|
||||
import { wrapTraced } from 'braintrust';
|
||||
import { z } from 'zod';
|
||||
import { thinkAndPrepWorkflowInputSchema } from '../schemas/workflow-schemas';
|
||||
import { Haiku35 } from '../utils/models/haiku-3-5';
|
||||
import { appendToConversation, standardizeMessages } from '../utils/standardizeMessages';
|
||||
import type { AnalystRuntimeContext } from '../workflows/analyst-workflow';
|
||||
|
||||
const inputSchema = thinkAndPrepWorkflowInputSchema;
|
||||
|
||||
// Schema for what the LLM returns
|
||||
const llmOutputSchema = z.object({
|
||||
title: z.string().describe('The title for the chat.'),
|
||||
});
|
||||
|
||||
// Schema for what the step returns (includes pass-through data)
|
||||
export const generateChatTitleOutputSchema = z.object({
|
||||
title: z.string().describe('The title for the chat.'),
|
||||
// Pass through dashboard context
|
||||
dashboardFiles: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
versionNumber: z.number(),
|
||||
metricIds: z.array(z.string()),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
});
|
||||
|
||||
const generateChatTitleInstructions = `
|
||||
I am a chat title generator that is responsible for generating a title for the chat.
|
||||
|
||||
The title should be 3-8 words, capturing the main topic or intent of the conversation.
|
||||
The title should be 3-8 words, capturing the main topic or intent of the conversation. With an emphasis on the user's question and most recent converstaion topic.
|
||||
`;
|
||||
|
||||
const generateChatTitleExecution = async ({
|
||||
inputData,
|
||||
runtimeContext,
|
||||
}: {
|
||||
inputData: z.infer<typeof inputSchema>;
|
||||
runtimeContext: RuntimeContext<AnalystRuntimeContext>;
|
||||
}): Promise<z.infer<typeof generateChatTitleOutputSchema>> => {
|
||||
try {
|
||||
// Use the input data directly
|
||||
const prompt = inputData.prompt;
|
||||
const conversationHistory = inputData.conversationHistory;
|
||||
export interface GenerateChatTitleParams {
|
||||
prompt: string;
|
||||
conversationHistory?: CoreMessage[];
|
||||
chatId?: string;
|
||||
messageId?: string;
|
||||
}
|
||||
|
||||
// Prepare messages for the agent
|
||||
export interface GenerateChatTitleResult {
|
||||
title: string;
|
||||
}
|
||||
|
||||
export async function generateChatTitle({
|
||||
prompt,
|
||||
conversationHistory,
|
||||
chatId,
|
||||
messageId,
|
||||
}: GenerateChatTitleParams): Promise<GenerateChatTitleResult> {
|
||||
try {
|
||||
// Prepare messages for the LLM
|
||||
let messages: CoreMessage[];
|
||||
if (conversationHistory && conversationHistory.length > 0) {
|
||||
// Use conversation history as context + append new user message
|
||||
messages = appendToConversation(conversationHistory as CoreMessage[], prompt);
|
||||
messages = appendToConversation(conversationHistory, prompt);
|
||||
} else {
|
||||
// Otherwise, use just the prompt
|
||||
messages = standardizeMessages(prompt);
|
||||
|
@ -97,9 +80,6 @@ const generateChatTitleExecution = async ({
|
|||
title = { title: 'New Analysis' };
|
||||
}
|
||||
|
||||
const chatId = runtimeContext.get('chatId');
|
||||
const messageId = runtimeContext.get('messageId');
|
||||
|
||||
// Run database updates concurrently
|
||||
const updatePromises: Promise<{ success: boolean }>[] = [];
|
||||
|
||||
|
@ -121,33 +101,16 @@ const generateChatTitleExecution = async ({
|
|||
|
||||
await Promise.all(updatePromises);
|
||||
|
||||
return {
|
||||
...title,
|
||||
dashboardFiles: inputData.dashboardFiles, // Pass through dashboard context
|
||||
};
|
||||
return { title: title.title };
|
||||
} catch (error) {
|
||||
// Handle AbortError gracefully
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
// Return a fallback title when aborted
|
||||
return {
|
||||
title: 'New Analysis',
|
||||
dashboardFiles: inputData.dashboardFiles, // Pass through dashboard context
|
||||
};
|
||||
return { title: 'New Analysis' };
|
||||
}
|
||||
|
||||
console.error('[GenerateChatTitle] Failed to generate chat title:', error);
|
||||
// Return a fallback title instead of crashing
|
||||
return {
|
||||
title: 'New Analysis',
|
||||
dashboardFiles: inputData.dashboardFiles, // Pass through dashboard context
|
||||
};
|
||||
return { title: 'New Analysis' };
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const generateChatTitleStep = createStep({
|
||||
id: 'generate-chat-title',
|
||||
description: 'This step is a single llm call to quickly generate a title for the chat.',
|
||||
inputSchema,
|
||||
outputSchema: generateChatTitleOutputSchema,
|
||||
execute: generateChatTitleExecution,
|
||||
});
|
||||
|
|
|
@ -197,9 +197,8 @@ rows:
|
|||
runtimeContext: contextWithoutUserId,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: contextWithoutUserId as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: contextWithoutUserId as unknown as RuntimeContext,
|
||||
});
|
||||
expect(result.message).toBe('Unable to verify your identity. Please log in again.');
|
||||
expect(result.files).toHaveLength(0);
|
||||
|
@ -218,9 +217,8 @@ description: Invalid dashboard
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(0);
|
||||
|
@ -247,9 +245,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(0);
|
||||
|
@ -275,9 +272,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(0);
|
||||
|
@ -308,9 +304,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
|
@ -369,9 +364,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
|
@ -409,9 +403,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.duration).toBeGreaterThan(0);
|
||||
|
@ -450,9 +443,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(3);
|
||||
|
@ -503,9 +495,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
|
@ -553,9 +544,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
|
@ -608,10 +598,10 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const successResult = await createDashboards.execute({
|
||||
context: successInput,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
const successResult = await createDashboards.execute(
|
||||
successInput,
|
||||
{ experimental_context: mockRuntimeContext as unknown as RuntimeContext }
|
||||
);
|
||||
expect(successResult.message).toBe('Successfully created 1 dashboard files.');
|
||||
|
||||
// Track created dashboard for cleanup
|
||||
|
@ -628,9 +618,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const failureResult = await createDashboards.execute({
|
||||
context: failureInput,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const failureResult = await createDashboards.execute(failureInput, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
expect(failureResult.message).toContain("Failed to create 'Failure Test'");
|
||||
});
|
||||
|
@ -662,9 +651,8 @@ rows:
|
|||
runtimeContext: mockRuntimeContext,
|
||||
};
|
||||
|
||||
const result = await createDashboards.execute({
|
||||
context: input,
|
||||
runtimeContext: mockRuntimeContext as unknown as RuntimeContext,
|
||||
const result = await createDashboards.execute(input, {
|
||||
experimental_context: mockRuntimeContext as unknown as RuntimeContext,
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
import type { ModelMessage, ToolModelMessage, ToolResultPart } from 'ai';
|
||||
import type { NoSuchToolError } from 'ai';
|
||||
|
||||
/**
|
||||
* Creates a healing message for NoSuchToolError that simulates a tool error result.
|
||||
* This allows the LLM to understand which tool failed and what tools are available.
|
||||
*
|
||||
* @param error - The NoSuchToolError that was caught
|
||||
* @param availableTools - A comma-separated string of available tool names
|
||||
* @returns A ModelMessage with a tool error result
|
||||
*/
|
||||
export function createNoSuchToolHealingMessage(
|
||||
error: NoSuchToolError,
|
||||
healingMessage: string
|
||||
): ModelMessage {
|
||||
return {
|
||||
role: 'tool',
|
||||
content: [
|
||||
{
|
||||
type: 'tool-result',
|
||||
toolCallId: error.toolCallId,
|
||||
toolName: error.toolName,
|
||||
output: {
|
||||
type: 'text',
|
||||
value: `Tool "${error.toolName}" is not available. ${healingMessage}.`,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
import type { LanguageModelV2ToolCall } from '@ai-sdk/provider';
|
||||
import { generateObject } from 'ai';
|
||||
import type { ToolSet } from 'ai';
|
||||
import { type InvalidToolInputError, NoSuchToolError, type ToolSet } from 'ai';
|
||||
import { Haiku35 } from '../models/haiku-3-5';
|
||||
|
||||
interface ToolCallWithArgs extends LanguageModelV2ToolCall {
|
||||
|
@ -10,11 +10,17 @@ interface ToolCallWithArgs extends LanguageModelV2ToolCall {
|
|||
export async function healToolWithLlm({
|
||||
toolCall,
|
||||
tools,
|
||||
error,
|
||||
}: {
|
||||
toolCall: LanguageModelV2ToolCall;
|
||||
tools: ToolSet;
|
||||
error: NoSuchToolError | InvalidToolInputError;
|
||||
}) {
|
||||
try {
|
||||
if (error instanceof NoSuchToolError) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const tool = tools[toolCall.toolName as keyof typeof tools];
|
||||
|
||||
if (!tool) {
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
export { healToolWithLlm } from './heal-tool-with-llm';
|
||||
export { createNoSuchToolHealingMessage } from './heal-no-such-tool';
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
import { createWorkflow } from '@mastra/core';
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
type AnalystAgentContext,
|
||||
AnalystAgentContextSchema,
|
||||
} from '../agents/analyst-agent/analyst-agent-context';
|
||||
import { type AnalystAgentOptions } from '../agents/analyst-agent/analyst-agent';
|
||||
import { thinkAndPrepWorkflowInputSchema } from '../schemas/workflow-schemas';
|
||||
|
||||
// Type alias for consistency
|
||||
export type AnalystAgentContext = AnalystAgentOptions;
|
||||
export type AnalystRuntimeContext = AnalystAgentOptions;
|
||||
|
||||
// Re-export for backward compatibility
|
||||
export { thinkAndPrepWorkflowInputSchema, AnalystAgentContextSchema, type AnalystAgentContext };
|
||||
export { thinkAndPrepWorkflowInputSchema, type AnalystAgentContext };
|
||||
// Legacy exports - deprecated, use AnalystAgentContext instead
|
||||
export {
|
||||
AnalystAgentContextSchema as AnalystRuntimeContextSchema,
|
||||
type AnalystAgentContext as AnalystRuntimeContext,
|
||||
};
|
||||
export { type AnalystAgentContext as AnalystRuntimeContext };
|
||||
import { analystStep } from '../steps/analyst-step';
|
||||
import { createTodosStep } from '../steps/create-todos-step';
|
||||
import { extractValuesSearchStep } from '../steps/extract-values-search-step';
|
||||
|
|
Loading…
Reference in New Issue