mirror of https://github.com/buster-so/buster.git
Refactor prompt loading and streamline agent steps
- Replaced file system reads with direct imports of prompt text files in various agent and step files to improve performance and reduce complexity. - Updated the `runDocsAgentStep` and `runDocsAgentWorkflow` functions to return `void` instead of structured outputs, simplifying their interfaces. - Enhanced test cases to validate prompt loading without relying on file system checks, ensuring faster and more reliable tests. - Cleaned up unused imports and optimized code structure across multiple files. These changes aim to enhance code clarity, maintainability, and performance in the agent workflow and related components.
This commit is contained in:
parent
ccc4528396
commit
da4f3605cc
|
@ -1,3 +1,4 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { esbuildPlugin } from '@trigger.dev/build/extensions';
|
||||
import { defineConfig } from '@trigger.dev/sdk';
|
||||
|
@ -42,14 +43,17 @@ export default defineConfig({
|
|||
// Handle sub-paths like @buster/ai/workflows/analyst-workflow
|
||||
// Check if subPath already starts with 'src', if so, don't add it again
|
||||
const cleanSubPath = subPath.startsWith('src/') ? subPath.slice(4) : subPath;
|
||||
resolvedPath = path.resolve(
|
||||
process.cwd(),
|
||||
'../..',
|
||||
'packages',
|
||||
packageName,
|
||||
'src',
|
||||
`${cleanSubPath}.ts`
|
||||
);
|
||||
const srcRoot = path.resolve(process.cwd(), '../..', 'packages', packageName, 'src');
|
||||
|
||||
const candidatePaths = [
|
||||
path.join(srcRoot, `${cleanSubPath}.ts`),
|
||||
path.join(srcRoot, cleanSubPath, 'index.ts'),
|
||||
path.join(srcRoot, `${cleanSubPath}.tsx`),
|
||||
path.join(srcRoot, cleanSubPath, 'index.tsx'),
|
||||
];
|
||||
|
||||
const found = candidatePaths.find((p) => fs.existsSync(p));
|
||||
resolvedPath = found ?? path.join(srcRoot, cleanSubPath);
|
||||
} else {
|
||||
// Handle direct package imports like @buster/ai
|
||||
resolvedPath = path.resolve(
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import analystAgentPrompt from './analyst-agent-prompt.txt';
|
||||
import { getAnalystAgentSystemPrompt } from './get-analyst-agent-system-prompt';
|
||||
|
||||
describe('Analyst Agent Instructions', () => {
|
||||
it('should validate template file contains expected variables', () => {
|
||||
const promptPath = path.join(__dirname, 'analyst-agent-prompt.txt');
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
const content = analystAgentPrompt;
|
||||
|
||||
// Expected template variables
|
||||
const expectedVariables = ['sql_dialect_guidance', 'date'];
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import analystAgentPrompt from './analyst-agent-prompt.txt';
|
||||
|
||||
/**
|
||||
* Template parameters for the analyst agent prompt
|
||||
|
@ -13,17 +12,9 @@ export interface AnalystTemplateParams {
|
|||
* Loads the analyst agent prompt template and replaces variables
|
||||
*/
|
||||
function loadAndProcessPrompt(params: AnalystTemplateParams): string {
|
||||
const promptPath = path.join(__dirname, 'analyst-agent-prompt.txt');
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
|
||||
return content
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, params.dataSourceSyntax)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
return analystAgentPrompt
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, params.dataSourceSyntax)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import docsAgentPrompt from './docs-agent-prompt.txt';
|
||||
import { getDocsAgentSystemPrompt } from './get-docs-agent-system-prompt';
|
||||
|
||||
describe('Docs Agent Instructions', () => {
|
||||
it('should validate template file contains expected variables', () => {
|
||||
const promptPath = path.join(__dirname, 'docs-agent-prompt.txt');
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
const content = docsAgentPrompt;
|
||||
|
||||
// Expected template variables
|
||||
const expectedVariables = ['folder_structure', 'date'];
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import docsAgentPrompt from './docs-agent-prompt.txt';
|
||||
|
||||
/**
|
||||
* Template parameters for the docs agent prompt
|
||||
|
@ -13,17 +12,9 @@ export interface DocsAgentTemplateParams {
|
|||
* Loads the docs agent prompt template and replaces variables
|
||||
*/
|
||||
function loadAndProcessPrompt(params: DocsAgentTemplateParams): string {
|
||||
const promptPath = path.join(__dirname, 'docs-agent-prompt.txt');
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
|
||||
return content
|
||||
.replace(/\{\{folder_structure\}\}/g, params.folderStructure)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
return docsAgentPrompt
|
||||
.replace(/\{\{folder_structure\}\}/g, params.folderStructure)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { getThinkAndPrepAgentSystemPrompt } from './get-think-and-prep-agent-system-prompt';
|
||||
import thinkAndPrepInvestigationPrompt from './think-and-prep-agent-investigation-prompt.txt';
|
||||
import thinkAndPrepStandardPrompt from './think-and-prep-agent-standard-prompt.txt';
|
||||
|
||||
describe('Think and Prep Agent Instructions', () => {
|
||||
describe.each([
|
||||
['standard', 'think-and-prep-agent-standard-prompt.txt'],
|
||||
['investigation', 'think-and-prep-agent-investigation-prompt.txt'],
|
||||
])('%s mode', (mode, filename) => {
|
||||
['standard', thinkAndPrepStandardPrompt],
|
||||
['investigation', thinkAndPrepInvestigationPrompt],
|
||||
])('%s mode', (mode, promptContent) => {
|
||||
it(`should validate ${mode} template file contains expected variables`, () => {
|
||||
const promptPath = path.join(__dirname, filename);
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
const content = promptContent;
|
||||
|
||||
// Expected template variables
|
||||
const expectedVariables = ['sql_dialect_guidance', 'date'];
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import thinkAndPrepInvestigationPrompt from './think-and-prep-agent-investigation-prompt.txt';
|
||||
import thinkAndPrepStandardPrompt from './think-and-prep-agent-standard-prompt.txt';
|
||||
|
||||
/**
|
||||
* Template parameters for the think and prep agent prompt
|
||||
|
@ -15,11 +15,11 @@ export interface ThinkAndPrepTemplateParams {
|
|||
export type AnalysisMode = 'standard' | 'investigation';
|
||||
|
||||
/**
|
||||
* Type-safe mapping of analysis modes to prompt file names
|
||||
* Type-safe mapping of analysis modes to prompt content
|
||||
*/
|
||||
const PROMPT_FILES: Record<AnalysisMode, string> = {
|
||||
standard: 'think-and-prep-agent-standard-prompt.txt',
|
||||
investigation: 'think-and-prep-agent-investigation-prompt.txt',
|
||||
const PROMPTS: Record<AnalysisMode, string> = {
|
||||
standard: thinkAndPrepStandardPrompt,
|
||||
investigation: thinkAndPrepInvestigationPrompt,
|
||||
} as const;
|
||||
|
||||
/**
|
||||
|
@ -29,18 +29,11 @@ function loadAndProcessPrompt(
|
|||
params: ThinkAndPrepTemplateParams,
|
||||
analysisMode: AnalysisMode = 'standard'
|
||||
): string {
|
||||
const promptFileName = PROMPT_FILES[analysisMode];
|
||||
const promptPath = path.join(__dirname, promptFileName);
|
||||
const content = PROMPTS[analysisMode];
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
|
||||
return content
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, params.sqlDialectGuidance)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template for ${analysisMode} mode: ${String(error)}`);
|
||||
}
|
||||
return content
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, params.sqlDialectGuidance)
|
||||
.replace(/\{\{date\}\}/g, params.date);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import createTodosSystemPrompt from './create-todos-system-prompt.txt';
|
||||
import { getCreateTodosSystemMessage } from './get-create-todos-system-message';
|
||||
|
||||
describe('Create Todos System Message', () => {
|
||||
|
@ -38,16 +37,10 @@ describe('Create Todos System Message', () => {
|
|||
expect(result).toContain('[ ]'); // Checkbox format
|
||||
});
|
||||
|
||||
it('should validate prompt file exists and is readable', () => {
|
||||
const promptPath = path.join(__dirname, 'create-todos-system-prompt.txt');
|
||||
|
||||
expect(() => {
|
||||
fs.accessSync(promptPath, fs.constants.R_OK);
|
||||
}).not.toThrow();
|
||||
|
||||
const stats = fs.statSync(promptPath);
|
||||
expect(stats.isFile()).toBe(true);
|
||||
expect(stats.size).toBeGreaterThan(0);
|
||||
it('should validate prompt file is loaded', () => {
|
||||
expect(createTodosSystemPrompt).toBeDefined();
|
||||
expect(typeof createTodosSystemPrompt).toBe('string');
|
||||
expect(createTodosSystemPrompt.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not contain any template variables', () => {
|
||||
|
|
|
@ -1,23 +1,8 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
|
||||
/**
|
||||
* Loads the create todos prompt template
|
||||
*/
|
||||
function loadPrompt(): string {
|
||||
const promptPath = path.join(__dirname, 'create-todos-system-prompt.txt');
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
return content;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
}
|
||||
import createTodosSystemPrompt from './create-todos-system-prompt.txt';
|
||||
|
||||
/**
|
||||
* Export the template function for use in step files
|
||||
*/
|
||||
export const getCreateTodosSystemMessage = (): string => {
|
||||
return loadPrompt();
|
||||
return createTodosSystemPrompt;
|
||||
};
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import createDocsTodosSystemPrompt from './create-docs-todos-system-prompt.txt';
|
||||
import { getCreateDocsTodosSystemMessage } from './get-create-docs-todos-system-message';
|
||||
|
||||
describe('Create Docs Todos System Message', () => {
|
||||
|
@ -39,16 +38,10 @@ describe('Create Docs Todos System Message', () => {
|
|||
expect(result).toContain('.sql files');
|
||||
});
|
||||
|
||||
it('should validate prompt file exists and is readable', () => {
|
||||
const promptPath = path.join(__dirname, 'create-docs-todos-system-prompt.txt');
|
||||
|
||||
expect(() => {
|
||||
fs.accessSync(promptPath, fs.constants.R_OK);
|
||||
}).not.toThrow();
|
||||
|
||||
const stats = fs.statSync(promptPath);
|
||||
expect(stats.isFile()).toBe(true);
|
||||
expect(stats.size).toBeGreaterThan(0);
|
||||
it('should validate prompt file is loaded', () => {
|
||||
expect(createDocsTodosSystemPrompt).toBeDefined();
|
||||
expect(typeof createDocsTodosSystemPrompt).toBe('string');
|
||||
expect(createDocsTodosSystemPrompt.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not contain any template variables', () => {
|
||||
|
|
|
@ -1,23 +1,8 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
|
||||
/**
|
||||
* Loads the create docs todos prompt template
|
||||
*/
|
||||
function loadPrompt(): string {
|
||||
const promptPath = path.join(__dirname, 'create-docs-todos-system-prompt.txt');
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
return content;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
}
|
||||
import createDocsTodosSystemPrompt from './create-docs-todos-system-prompt.txt';
|
||||
|
||||
/**
|
||||
* Export the template function for use in step files
|
||||
*/
|
||||
export const getCreateDocsTodosSystemMessage = (): string => {
|
||||
return loadPrompt();
|
||||
return createDocsTodosSystemPrompt;
|
||||
};
|
||||
|
|
|
@ -44,7 +44,7 @@ export type DocsAgentStepOutput = z.infer<typeof DocsAgentStepOutputSchema>;
|
|||
/**
|
||||
* Main documentation agent that processes todos and creates documentation
|
||||
*/
|
||||
export async function runDocsAgentStep(params: DocsAgentStepInput): Promise<DocsAgentStepOutput> {
|
||||
export async function runDocsAgentStep(params: DocsAgentStepInput): Promise<void> {
|
||||
// Validate input
|
||||
const validatedParams = DocsAgentStepInputSchema.parse(params);
|
||||
|
||||
|
@ -53,24 +53,6 @@ export async function runDocsAgentStep(params: DocsAgentStepInput): Promise<Docs
|
|||
const todoList = validatedParams.todoList;
|
||||
const dataSourceId = validatedParams.context.dataSourceId;
|
||||
|
||||
// Create abort controller for handling idle tool
|
||||
const abortController = new AbortController();
|
||||
|
||||
// Initialize tracking variables
|
||||
let documentationCreated = false;
|
||||
let clarificationNeeded = false;
|
||||
let filesCreated = 0;
|
||||
const toolsUsed = new Set<string>();
|
||||
let finished = false;
|
||||
let updatedClarificationQuestion: any = undefined;
|
||||
|
||||
console.info('[DocsAgent] Starting docs agent execution', {
|
||||
hasSandbox: !!sandbox,
|
||||
todoListLength: todoList?.length || 0,
|
||||
organizationId: validatedParams.organizationId,
|
||||
dataSourceId,
|
||||
});
|
||||
|
||||
try {
|
||||
// Get current working directory from sandbox
|
||||
let cwdMessage = '';
|
||||
|
@ -98,7 +80,7 @@ export async function runDocsAgentStep(params: DocsAgentStepInput): Promise<Docs
|
|||
const userMessage = `${validatedParams.message}`;
|
||||
const todoMessage = `<todo-list>\n${todoList}\n</todo-list>`;
|
||||
|
||||
const messages: any[] = [
|
||||
const messages: ModelMessage[] = [
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage,
|
||||
|
@ -120,111 +102,15 @@ export async function runDocsAgentStep(params: DocsAgentStepInput): Promise<Docs
|
|||
// Execute the docs agent
|
||||
const result = await docsAgent.stream({ messages });
|
||||
|
||||
// Process the stream to extract results
|
||||
let stepCount = 0;
|
||||
let lastTextContent = '';
|
||||
// Wait for the response and extract tool calls
|
||||
const response = await result.response;
|
||||
|
||||
for await (const chunk of result.fullStream) {
|
||||
// Check if aborted
|
||||
if (abortController.signal.aborted) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Track step count
|
||||
if ((chunk as any).type === 'step-start') {
|
||||
stepCount++;
|
||||
console.log(`[DocsAgent] Step ${stepCount} started`);
|
||||
}
|
||||
|
||||
// Log text chunks to see what the agent is thinking
|
||||
if (chunk.type === 'text-delta' && (chunk as any).textDelta) {
|
||||
lastTextContent += (chunk as any).textDelta;
|
||||
}
|
||||
|
||||
if ((chunk as any).type === 'step-finish') {
|
||||
console.log(
|
||||
`[DocsAgent] Step ${stepCount} finished. Last text: ${lastTextContent.slice(0, 200)}...`
|
||||
);
|
||||
lastTextContent = '';
|
||||
}
|
||||
|
||||
// Track tool usage
|
||||
if (chunk.type === 'tool-call') {
|
||||
console.log(
|
||||
`[DocsAgent] Tool call: ${chunk.toolName} with args:`,
|
||||
JSON.stringify((chunk as any).args).slice(0, 200)
|
||||
);
|
||||
toolsUsed.add(chunk.toolName);
|
||||
|
||||
// Track specific tool outcomes
|
||||
if (chunk.toolName === 'createFiles' || chunk.toolName === 'editFiles') {
|
||||
console.log(
|
||||
`[DocsAgent] Tool ${chunk.toolName} called - marking documentationCreated = true`
|
||||
);
|
||||
documentationCreated = true;
|
||||
filesCreated++;
|
||||
}
|
||||
|
||||
if (chunk.toolName === 'updateClarificationsFile') {
|
||||
clarificationNeeded = true;
|
||||
}
|
||||
|
||||
if (chunk.toolName === 'idleTool') {
|
||||
console.log('[DocsAgent] Idle tool called - aborting stream');
|
||||
finished = true;
|
||||
abortController.abort();
|
||||
}
|
||||
}
|
||||
|
||||
// Check for clarification updates in tool results
|
||||
if (chunk.type === 'tool-result') {
|
||||
if (chunk.toolName === 'updateClarificationsFile' && (chunk as any).result) {
|
||||
// Store any new clarification questions
|
||||
const resultData = (chunk as any).result as Record<string, unknown>;
|
||||
if (resultData.clarificationQuestion) {
|
||||
updatedClarificationQuestion = resultData.clarificationQuestion;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!response || !Array.isArray(response.messages)) {
|
||||
throw new Error('Docs agent returned an invalid response shape (missing messages array)');
|
||||
}
|
||||
|
||||
console.log('[DocsAgent] Final results:', {
|
||||
documentationCreated,
|
||||
filesCreated,
|
||||
toolsUsed: Array.from(toolsUsed),
|
||||
finished,
|
||||
});
|
||||
|
||||
return {
|
||||
todos: validatedParams.todos.split('\n').filter((line) => line.trim()),
|
||||
todoList: validatedParams.todoList,
|
||||
documentationCreated,
|
||||
clarificationNeeded,
|
||||
clarificationQuestion: updatedClarificationQuestion,
|
||||
finished,
|
||||
metadata: {
|
||||
filesCreated,
|
||||
toolsUsed: Array.from(toolsUsed),
|
||||
},
|
||||
};
|
||||
return;
|
||||
} catch (error) {
|
||||
// Handle abort error gracefully
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
console.info('[DocsAgent] Stream aborted successfully (idle tool called)');
|
||||
|
||||
return {
|
||||
todos: validatedParams.todos.split('\n').filter((line) => line.trim()),
|
||||
todoList: validatedParams.todoList,
|
||||
documentationCreated,
|
||||
clarificationNeeded,
|
||||
finished: true,
|
||||
metadata: {
|
||||
filesCreated,
|
||||
toolsUsed: Array.from(toolsUsed),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
console.error('[DocsAgent] Error executing docs agent:', error);
|
||||
throw new Error(
|
||||
`Docs agent execution failed: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import dashboardToolDescription from './dashboard-tool-description.txt';
|
||||
|
||||
/**
|
||||
* Template parameters for the dashboard tool prompt
|
||||
|
@ -9,24 +8,9 @@ export interface DashboardToolTemplateParams {
|
|||
date: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the dashboard tool prompt template and replaces variables
|
||||
*/
|
||||
function loadAndProcessPrompt(): string {
|
||||
const promptPath = path.join(__dirname, 'dashboard-tool-description.txt');
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
|
||||
return content;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Export the template function for use in dashboard tool
|
||||
*/
|
||||
export const getDashboardToolDescription = (): string => {
|
||||
return loadAndProcessPrompt();
|
||||
return dashboardToolDescription;
|
||||
};
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import dashboardToolDescription from './dashboard-tool-description.txt';
|
||||
import { getDashboardToolDescription } from './get-dashboard-tool-description';
|
||||
|
||||
describe('Dashboard Tool Prompt Instructions', () => {
|
||||
it('should validate template file has no template variables', () => {
|
||||
const promptPath = path.join(__dirname, 'dashboard-tool-description.txt');
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
const content = dashboardToolDescription;
|
||||
|
||||
// Find any template variables in the file
|
||||
const templateVariablePattern = /\{\{([^}]+)\}\}/g;
|
||||
|
@ -32,9 +30,7 @@ describe('Dashboard Tool Prompt Instructions', () => {
|
|||
expect(result.length).toBeGreaterThan(0);
|
||||
|
||||
// Should return the raw content from the text file
|
||||
const promptPath = path.join(__dirname, 'dashboard-tool-description.txt');
|
||||
const expectedContent = fs.readFileSync(promptPath, 'utf-8');
|
||||
expect(result).toBe(expectedContent);
|
||||
expect(result).toBe(dashboardToolDescription);
|
||||
});
|
||||
|
||||
it('should contain expected sections from the dashboard description', () => {
|
||||
|
|
|
@ -56,6 +56,7 @@ describe('modify-dashboards-start', () => {
|
|||
file_type: 'dashboard',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'dashboard content' }, // Need file content for entry to be created
|
||||
},
|
||||
],
|
||||
};
|
||||
|
@ -97,6 +98,7 @@ describe('modify-dashboards-start', () => {
|
|||
file_type: 'dashboard',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'dashboard content' }, // Need file content for entry to be created
|
||||
},
|
||||
],
|
||||
};
|
||||
|
@ -150,6 +152,7 @@ describe('modify-dashboards-start', () => {
|
|||
file_type: 'dashboard',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'dashboard content' }, // Need file content for entry to be created
|
||||
},
|
||||
],
|
||||
};
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { getMetricToolDescriptionPrompt } from './get-metric-tool-description';
|
||||
import metricToolDescription from './metric-tool-description.txt';
|
||||
|
||||
describe('Metric Tool Description Instructions', () => {
|
||||
it('should validate template file contains expected variables', () => {
|
||||
const promptPath = path.join(__dirname, 'metric-tool-description.txt');
|
||||
const content = fs.readFileSync(promptPath, 'utf-8');
|
||||
const content = metricToolDescription;
|
||||
|
||||
// Expected template variables
|
||||
const expectedVariables = ['sql_dialect_guidance', 'date'];
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import metricToolDescription from './metric-tool-description.txt';
|
||||
|
||||
/**
|
||||
* Template parameters for the metric tool description prompt
|
||||
|
@ -9,45 +8,6 @@ export interface MetricToolTemplateParams {
|
|||
date: string;
|
||||
}
|
||||
|
||||
// Cache the prompt content to avoid repeated file reads
|
||||
let cachedPrompt: string | null = null;
|
||||
|
||||
// Get the prompt content, with caching
|
||||
const getPromptContent = (): string => {
|
||||
if (cachedPrompt) {
|
||||
return cachedPrompt;
|
||||
}
|
||||
|
||||
// Try multiple possible locations for the prompt file
|
||||
const possiblePaths = [
|
||||
// Source location (for tests)
|
||||
path.join(__dirname, 'metric-tool-description.txt'),
|
||||
// Alternative source location
|
||||
path.join(__dirname, '..', 'helpers', 'metric-tool-description.txt'),
|
||||
// Compiled location
|
||||
path.join(
|
||||
process.cwd(),
|
||||
'packages/ai/dist/tools/visualization-tools/metrics/helpers/metric-tool-description.txt'
|
||||
),
|
||||
// Direct source path from project root
|
||||
path.join(
|
||||
process.cwd(),
|
||||
'packages/ai/src/tools/visualization-tools/metrics/helpers/metric-tool-description.txt'
|
||||
),
|
||||
];
|
||||
|
||||
for (const promptPath of possiblePaths) {
|
||||
try {
|
||||
cachedPrompt = fs.readFileSync(promptPath, 'utf-8');
|
||||
return cachedPrompt;
|
||||
} catch {
|
||||
// Try next path
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Failed to load metric tool description prompt from any location');
|
||||
};
|
||||
|
||||
/**
|
||||
* Loads the metric tool description prompt template and replaces variables
|
||||
*/
|
||||
|
@ -56,30 +16,20 @@ function loadAndProcessPrompt(sqlDialectGuidance: string): string {
|
|||
throw new Error('SQL dialect guidance is required');
|
||||
}
|
||||
|
||||
try {
|
||||
const content = getPromptContent();
|
||||
// Replace template variables
|
||||
const currentDate = new Date().toISOString().split('T')[0] ?? '';
|
||||
const processedContent = metricToolDescription
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, sqlDialectGuidance)
|
||||
.replace(/\{\{date\}\}/g, currentDate);
|
||||
|
||||
// Replace template variables
|
||||
const currentDate = new Date().toISOString().split('T')[0] ?? '';
|
||||
const processedContent = content
|
||||
.replace(/\{\{sql_dialect_guidance\}\}/g, sqlDialectGuidance)
|
||||
.replace(/\{\{date\}\}/g, currentDate);
|
||||
|
||||
return processedContent;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
return processedContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export the template function for use in metric tool
|
||||
*/
|
||||
export const getMetricToolDescription = (): string => {
|
||||
try {
|
||||
return getPromptContent();
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to load prompt template: ${String(error)}`);
|
||||
}
|
||||
return metricToolDescription;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -7,11 +7,15 @@ vi.mock('@buster/database', () => ({
|
|||
updateMessageEntries: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('../../../utils/streaming/optimistic-json-parser', () => ({
|
||||
// Mock the optimistic JSON parser
|
||||
const mockParse = vi.fn();
|
||||
const mockGetOptimisticValue = vi.fn();
|
||||
|
||||
vi.mock('../../../../utils/streaming/optimistic-json-parser', () => ({
|
||||
OptimisticJsonParser: {
|
||||
parse: vi.fn(),
|
||||
parse: mockParse,
|
||||
},
|
||||
getOptimisticValue: vi.fn(),
|
||||
getOptimisticValue: mockGetOptimisticValue,
|
||||
}));
|
||||
|
||||
describe('createModifyMetricsDelta', () => {
|
||||
|
@ -44,34 +48,28 @@ describe('createModifyMetricsDelta', () => {
|
|||
|
||||
describe('string delta handling', () => {
|
||||
it('should accumulate string deltas in argsText', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: null,
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([]);
|
||||
mockGetOptimisticValue.mockReturnValue([]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
await deltaHandler({ inputTextDelta: '{"files":[', toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(state.argsText).toBe('{"files":[');
|
||||
expect(OptimisticJsonParser.parse).toHaveBeenCalledWith('{"files":[');
|
||||
expect(mockParse).toHaveBeenCalledWith('{"files":[');
|
||||
});
|
||||
|
||||
it('should update parsedArgs when JSON is parsed', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
const parsedData = { files: [{ id: 'metric-1', yml_content: 'content' }] };
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: parsedData,
|
||||
isComplete: true,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([]);
|
||||
mockGetOptimisticValue.mockReturnValue([]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
await deltaHandler({ inputTextDelta: 'complete json', toolCallId: 'tool-123', messages: [] });
|
||||
|
@ -81,15 +79,12 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should update state files from parsed array', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([
|
||||
mockGetOptimisticValue.mockReturnValue([
|
||||
{ id: 'metric-1', yml_content: 'content1', name: 'Metric 1' },
|
||||
{ id: 'metric-2', yml_content: 'content2' },
|
||||
]);
|
||||
|
@ -128,12 +123,12 @@ describe('createModifyMetricsDelta', () => {
|
|||
},
|
||||
];
|
||||
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([
|
||||
mockGetOptimisticValue.mockReturnValue([
|
||||
{ id: 'metric-1', yml_content: 'updated content', name: 'Updated Name' },
|
||||
]);
|
||||
|
||||
|
@ -149,17 +144,14 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should handle partial file data', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
|
||||
// First delta with just ID
|
||||
(getOptimisticValue as any).mockReturnValue([{ id: 'metric-1' }]);
|
||||
mockGetOptimisticValue.mockReturnValue([{ id: 'metric-1' }]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
await deltaHandler({ inputTextDelta: 'delta1', toolCallId: 'tool-123', messages: [] });
|
||||
|
@ -173,15 +165,12 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should update database when messageId and toolCallId exist', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
mockGetOptimisticValue.mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
await deltaHandler({ inputTextDelta: 'delta', toolCallId: 'tool-123', messages: [] });
|
||||
|
@ -195,18 +184,15 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should not update database when messageId is missing', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
const contextWithoutMessageId = { ...context };
|
||||
delete contextWithoutMessageId.messageId;
|
||||
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
mockGetOptimisticValue.mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(contextWithoutMessageId, state);
|
||||
await deltaHandler({ inputTextDelta: 'delta', toolCallId: 'tool-123', messages: [] });
|
||||
|
@ -215,10 +201,6 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should filter undefined entries before creating reasoning message', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
|
||||
// State with undefined entries
|
||||
state.files = [
|
||||
undefined as any,
|
||||
|
@ -227,12 +209,12 @@ describe('createModifyMetricsDelta', () => {
|
|||
{ id: 'metric-2', yml_content: 'content2', status: 'loading' },
|
||||
];
|
||||
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([]);
|
||||
mockGetOptimisticValue.mockReturnValue([]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
await deltaHandler({ inputTextDelta: 'delta', toolCallId: 'tool-123', messages: [] });
|
||||
|
@ -247,17 +229,14 @@ describe('createModifyMetricsDelta', () => {
|
|||
});
|
||||
|
||||
it('should handle database update errors gracefully', async () => {
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
(updateMessageEntries as any).mockRejectedValue(new Error('Database error'));
|
||||
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
});
|
||||
(getOptimisticValue as any).mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
mockGetOptimisticValue.mockReturnValue([{ id: 'metric-1', yml_content: 'content' }]);
|
||||
|
||||
const deltaHandler = createModifyMetricsDelta(context, state);
|
||||
|
||||
|
@ -316,11 +295,8 @@ describe('createModifyMetricsDelta', () => {
|
|||
describe('logging', () => {
|
||||
it('should log correct information', async () => {
|
||||
const consoleSpy = vi.spyOn(console, 'info').mockImplementation(() => {});
|
||||
const { OptimisticJsonParser, getOptimisticValue } = await import(
|
||||
'../../../../utils/streaming/optimistic-json-parser'
|
||||
);
|
||||
|
||||
(OptimisticJsonParser.parse as any).mockReturnValue({
|
||||
mockParse.mockReturnValue({
|
||||
parsed: {},
|
||||
isComplete: false,
|
||||
extractedValues: new Map(),
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
import { updateMessageFields } from '@buster/database';
|
||||
import { updateMessageEntries } from '@buster/database';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { createModifyMetricsStart } from './modify-metrics-start';
|
||||
import type { ModifyMetricsInput, ModifyMetricsState } from './modify-metrics-tool';
|
||||
|
||||
vi.mock('@buster/database', () => ({
|
||||
updateMessageFields: vi.fn(),
|
||||
createMessageFields: vi.fn(),
|
||||
updateMessageEntries: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('createModifyMetricsStart', () => {
|
||||
|
@ -34,7 +33,7 @@ describe('createModifyMetricsStart', () => {
|
|||
};
|
||||
});
|
||||
|
||||
it('should initialize state with processingStartTime and toolCallId', async () => {
|
||||
it('should initialize state with toolCallId', async () => {
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [
|
||||
{ id: 'metric-1', yml_content: 'content1' },
|
||||
|
@ -43,151 +42,146 @@ describe('createModifyMetricsStart', () => {
|
|||
};
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(state.toolCallId).toBeDefined();
|
||||
expect(state.toolCallId).toMatch(/^modify-metrics-\d+-[a-z0-9]+$/);
|
||||
expect(state.toolCallId).toBe('tool-123');
|
||||
});
|
||||
|
||||
it('should create database entries when messageId exists', async () => {
|
||||
context.messageId = 'msg-123';
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [{ id: 'metric-1', yml_content: 'content' }],
|
||||
};
|
||||
// Add files to state before the call since that's what triggers entry creation
|
||||
state.files = [
|
||||
{
|
||||
id: 'metric-1',
|
||||
file_type: 'metric',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'content' },
|
||||
},
|
||||
];
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(updateMessageFields).toHaveBeenCalledTimes(1);
|
||||
expect(updateMessageFields).toHaveBeenCalledWith('msg-123', {
|
||||
reasoning: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: state.toolCallId,
|
||||
type: 'files',
|
||||
title: 'Modifying metrics...',
|
||||
status: 'loading',
|
||||
file_ids: [],
|
||||
files: {},
|
||||
}),
|
||||
]),
|
||||
rawLlmMessages: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
type: 'tool-call',
|
||||
toolCallId: state.toolCallId,
|
||||
toolName: 'modify-metrics-file',
|
||||
args: {},
|
||||
}),
|
||||
]),
|
||||
});
|
||||
expect(updateMessageEntries).toHaveBeenCalledTimes(1);
|
||||
expect(updateMessageEntries).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
messageId: 'msg-123',
|
||||
mode: 'append',
|
||||
})
|
||||
);
|
||||
|
||||
expect(state.toolCallId).toBe(state.toolCallId);
|
||||
expect(state.toolCallId).toBe('tool-123');
|
||||
});
|
||||
|
||||
it('should not create database entries when messageId is missing', async () => {
|
||||
// No messageId in context
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [{ id: 'metric-1', yml_content: 'content' }],
|
||||
};
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(updateMessageFields).not.toHaveBeenCalled();
|
||||
expect(state.toolCallId).toBeUndefined();
|
||||
expect(updateMessageEntries).not.toHaveBeenCalled();
|
||||
expect(state.toolCallId).toBe('tool-123'); // toolCallId should still be set
|
||||
});
|
||||
|
||||
it('should handle database errors gracefully', async () => {
|
||||
context.messageId = 'msg-123';
|
||||
(updateMessageFields as any).mockRejectedValue(new Error('Database error'));
|
||||
// Add files to state to trigger database update
|
||||
state.files = [
|
||||
{
|
||||
id: 'metric-1',
|
||||
file_type: 'metric',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'content' },
|
||||
},
|
||||
];
|
||||
|
||||
(updateMessageEntries as any).mockRejectedValue(new Error('Database error'));
|
||||
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [{ id: 'metric-1', yml_content: 'content' }],
|
||||
};
|
||||
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
|
||||
// Should not throw
|
||||
await expect(
|
||||
startHandler({ ...input, toolCallId: 'tool-123', messages: [] })
|
||||
startHandler({ toolCallId: 'tool-123', messages: [] })
|
||||
).resolves.not.toThrow();
|
||||
|
||||
expect(updateMessageFields).toHaveBeenCalled();
|
||||
expect(updateMessageEntries).toHaveBeenCalled();
|
||||
// State should still be initialized even if database fails
|
||||
expect(state.toolCallId).toBeDefined();
|
||||
// But reasoningEntryId should not be set due to error
|
||||
expect(state.toolCallId).toBeUndefined();
|
||||
expect(state.toolCallId).toBe('tool-123');
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'[modify-metrics] Error updating entries on start:',
|
||||
expect.any(Error)
|
||||
);
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should handle empty files array', async () => {
|
||||
context.messageId = 'msg-123';
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [],
|
||||
};
|
||||
// state.files is already empty from beforeEach
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(state.toolCallId).toBeDefined();
|
||||
expect(updateMessageFields).toHaveBeenCalled();
|
||||
expect(state.toolCallId).toBe('tool-123');
|
||||
// Should not call database when no files exist
|
||||
expect(updateMessageEntries).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should log correct information', async () => {
|
||||
it('should not log information', async () => {
|
||||
const consoleSpy = vi.spyOn(console, 'info').mockImplementation(() => {});
|
||||
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [
|
||||
{ id: 'metric-1', yml_content: 'content1' },
|
||||
{ id: 'metric-2', yml_content: 'content2' },
|
||||
],
|
||||
};
|
||||
|
||||
const startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'[modify-metrics] Starting metric modification',
|
||||
expect.objectContaining({
|
||||
fileCount: 2,
|
||||
messageId: undefined,
|
||||
toolCallId: state.toolCallId,
|
||||
timestamp: expect.any(String),
|
||||
})
|
||||
);
|
||||
// The implementation doesn't log info messages
|
||||
expect(consoleSpy).not.toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should work with both messageId present and absent', async () => {
|
||||
// First test without messageId
|
||||
const input: ModifyMetricsInput = {
|
||||
files: [{ id: 'metric-1', yml_content: 'content' }],
|
||||
};
|
||||
|
||||
let startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-123', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-123', messages: [] });
|
||||
|
||||
expect(updateMessageFields).not.toHaveBeenCalled();
|
||||
expect(state.toolCallId).toBeUndefined();
|
||||
expect(updateMessageEntries).not.toHaveBeenCalled();
|
||||
expect(state.toolCallId).toBe('tool-123');
|
||||
|
||||
// Reset state for second test
|
||||
state = {
|
||||
argsText: '',
|
||||
files: [],
|
||||
files: [
|
||||
{
|
||||
id: 'metric-1',
|
||||
file_type: 'metric',
|
||||
version_number: 1,
|
||||
status: 'loading',
|
||||
file: { text: 'content' },
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// Reset mocks for clean second test
|
||||
vi.clearAllMocks();
|
||||
// Mock successful database update for the second test
|
||||
(updateMessageFields as any).mockResolvedValue(undefined);
|
||||
(updateMessageEntries as any).mockResolvedValue(undefined);
|
||||
|
||||
// Now test with messageId
|
||||
context.messageId = 'msg-456';
|
||||
startHandler = createModifyMetricsStart(context, state);
|
||||
await startHandler({ ...input, toolCallId: 'tool-456', messages: [] });
|
||||
await startHandler({ toolCallId: 'tool-456', messages: [] });
|
||||
|
||||
expect(updateMessageFields).toHaveBeenCalledWith('msg-456', expect.any(Object));
|
||||
// The reasoningEntryId should be set to the toolCallId after successful database update
|
||||
expect(state.toolCallId).toBe(state.toolCallId);
|
||||
expect(state.toolCallId).toBeDefined();
|
||||
expect(updateMessageEntries).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
messageId: 'msg-456',
|
||||
mode: 'append',
|
||||
})
|
||||
);
|
||||
expect(state.toolCallId).toBe('tool-456');
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
declare module '*.txt' {
|
||||
const content: string;
|
||||
export default content;
|
||||
}
|
|
@ -50,9 +50,7 @@ export type DocsAgentWorkflowOutput = z.infer<typeof docsAgentWorkflowOutputSche
|
|||
* 3. Create TODO list for documentation tasks
|
||||
* 4. Execute the docs agent to create documentation
|
||||
*/
|
||||
export async function runDocsAgentWorkflow(
|
||||
input: DocsAgentWorkflowInput
|
||||
): Promise<DocsAgentWorkflowOutput> {
|
||||
export async function runDocsAgentWorkflow(input: DocsAgentWorkflowInput): Promise<void> {
|
||||
// Validate input
|
||||
const validatedInput = docsAgentWorkflowInputSchema.parse(input);
|
||||
|
||||
|
@ -86,7 +84,7 @@ export async function runDocsAgentWorkflow(
|
|||
});
|
||||
|
||||
// Step 4: Execute the docs agent with all the prepared data
|
||||
const agentResult = await runDocsAgentStep({
|
||||
const _agentResult = await runDocsAgentStep({
|
||||
todos: todosResult.todos,
|
||||
todoList: todosResult.todos, // Using todos as todoList
|
||||
message: treeResult.message,
|
||||
|
@ -96,15 +94,7 @@ export async function runDocsAgentWorkflow(
|
|||
});
|
||||
|
||||
// Return the final results from the agent
|
||||
return {
|
||||
todos: agentResult.todos,
|
||||
todoList: agentResult.todoList,
|
||||
documentationCreated: agentResult.documentationCreated,
|
||||
clarificationNeeded: agentResult.clarificationNeeded,
|
||||
clarificationQuestion: agentResult.clarificationQuestion,
|
||||
finished: agentResult.finished,
|
||||
metadata: agentResult.metadata,
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
// Default export for backward compatibility if needed
|
||||
|
|
|
@ -1,11 +1,28 @@
|
|||
import { baseConfig } from '@buster/vitest-config';
|
||||
import { defineConfig } from 'vitest/config';
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
|
||||
// Custom plugin to handle .txt file imports
|
||||
const textFilePlugin = () => ({
|
||||
name: 'text-file-plugin',
|
||||
transform(code: string, id: string) {
|
||||
if (id.endsWith('.txt')) {
|
||||
const content = fs.readFileSync(id, 'utf-8');
|
||||
return {
|
||||
code: `export default ${JSON.stringify(content)};`,
|
||||
map: null,
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
export default defineConfig(async (env) => {
|
||||
const base = await baseConfig(env);
|
||||
|
||||
return {
|
||||
...base,
|
||||
plugins: [...(base.plugins || []), textFilePlugin()],
|
||||
test: {
|
||||
...base.test,
|
||||
// Increase timeout for streaming tests
|
||||
|
|
Loading…
Reference in New Issue