mirror of https://github.com/buster-so/buster.git
feat: enhance documentation and testing capabilities for docs agent workflow
- Added section in CLAUDE.md for direct database access during integration testing. - Updated `maxSteps` in `docs-agent` to allow for more complex tasks. - Improved validation in `docs-agent-context` for sandbox instances. - Enhanced `create-docs-todos` step to handle todos more effectively. - Introduced comprehensive integration tests for the docs agent workflow, covering various scenarios and edge cases. - Added test helpers for creating mock dbt projects and managing sandboxes. - Implemented error handling and logging improvements in the workflow execution process.
This commit is contained in:
parent
a08f32fe1d
commit
2f4aeba817
11
CLAUDE.md
11
CLAUDE.md
|
@ -292,6 +292,17 @@ export async function getWorkspaceSettingsHandler(
|
|||
- `turbo run test` for running all tests
|
||||
- Add `--filter=<package-name>` to run tests for specific packages
|
||||
|
||||
### Database Access for Integration Testing
|
||||
- **Direct database queries** - You can run queries against the local database using `psql` with the following connection:
|
||||
```bash
|
||||
DATABASE_URL="postgresql://postgres:postgres@127.0.0.1:54322/postgres"
|
||||
```
|
||||
- **Usage example**:
|
||||
```bash
|
||||
psql "postgresql://postgres:postgres@127.0.0.1:54322/postgres" -c "SELECT * FROM users LIMIT 5;"
|
||||
```
|
||||
- **Purpose** - This is primarily for writing and iterating on integration tests to verify database state and test query behavior
|
||||
|
||||
## Pre-Completion Workflow
|
||||
- Always run `turbo run test:unit`, `turbo run lint`, and `turbo run build:dry-run` before making any pull request or finishing a feature, bugfix, etc. to ensure things make it through CI/CD
|
||||
- You can run all these checks simultaneously with `turbo run build:dry-run lint test:unit`
|
|
@ -9,6 +9,7 @@ import {
|
|||
executeSqlDocsAgent,
|
||||
grepSearch,
|
||||
idleTool,
|
||||
lsFiles,
|
||||
readFiles,
|
||||
sequentialThinking,
|
||||
updateClarificationsFile,
|
||||
|
@ -17,7 +18,7 @@ import {
|
|||
import { Sonnet4 } from '../../utils/models/sonnet-4';
|
||||
|
||||
const DEFAULT_OPTIONS = {
|
||||
maxSteps: 18,
|
||||
maxSteps: 30,
|
||||
temperature: 0,
|
||||
maxTokens: 10000,
|
||||
providerOptions: {
|
||||
|
@ -38,6 +39,7 @@ export const docsAgent = new Agent({
|
|||
editFiles,
|
||||
createFiles,
|
||||
deleteFiles,
|
||||
lsFiles,
|
||||
executeSql: executeSqlDocsAgent, // Use the docs-specific SQL tool that operates as a
|
||||
bashExecute,
|
||||
updateClarificationsFile,
|
||||
|
|
|
@ -24,12 +24,7 @@ export type MessageUserClarifyingQuestion = z.infer<typeof ClarifyingQuestionSch
|
|||
export const DocsAgentContextSchema = z.object({
|
||||
[DocsAgentContextKeys.Sandbox]: z.custom<Sandbox>(
|
||||
(val) => {
|
||||
return (
|
||||
val &&
|
||||
typeof val === 'object' &&
|
||||
typeof val.execute === 'function' &&
|
||||
typeof val.cleanup === 'function'
|
||||
);
|
||||
return val && typeof val === 'object' && 'id' in val && 'fs' in val;
|
||||
},
|
||||
{
|
||||
message: 'Invalid Sandbox instance',
|
||||
|
|
|
@ -26,7 +26,7 @@ const createDocsTodosStepOutputSchema = z.object({
|
|||
const DEFAULT_OPTIONS = {
|
||||
maxSteps: 1,
|
||||
temperature: 0,
|
||||
maxTokens: 300,
|
||||
maxTokens: 3000,
|
||||
};
|
||||
|
||||
const CREATE_TODO_LIST_PROMPT = `### Overview
|
||||
|
@ -82,6 +82,21 @@ You have access to various tools to complete tasks. Adhere to these rules:
|
|||
- Include confirmation items at phase ends.
|
||||
---
|
||||
|
||||
### How to Use the createTodoList Tool
|
||||
**IMPORTANT**: When you are ready to create the TODO list, you must call the createTodoList tool with a single parameter called "todos" that contains your entire markdown-formatted TODO list as a string. The markdown should follow the exact format shown in the examples below.
|
||||
|
||||
For simple requests like "Document all models", create a basic TODO list:
|
||||
\`\`\`
|
||||
# DBT Documentation Todo
|
||||
|
||||
## Phase 1: Document Models
|
||||
- [ ] Review all models in the project
|
||||
- [ ] Add descriptions to each model
|
||||
- [ ] Document all columns
|
||||
- [ ] Create relationships where applicable
|
||||
- [ ] Push changes and create pull request
|
||||
\`\`\`
|
||||
|
||||
### Examples
|
||||
#### User Request: "can you update the docs to clarify that deal amount fields in ‘customers’ table actually originate from HubSpot and the closed won amount field should be used when calculating the primary deal value"
|
||||
\`\`\`
|
||||
|
@ -238,31 +253,16 @@ const createDocsTodosExecution = async ({
|
|||
// Extract todos from the result
|
||||
let todosString = '';
|
||||
|
||||
// The createTodoList tool creates a file with the todos
|
||||
// We need to look through the tool results to find the file content
|
||||
// Look for the todos in the tool call arguments
|
||||
if (result.toolCalls && Array.isArray(result.toolCalls)) {
|
||||
for (const toolCall of result.toolCalls) {
|
||||
if (toolCall.toolName === 'createTodoList') {
|
||||
// The tool creates a file, so we need to look at the result
|
||||
const toolResults = result.toolResults || [];
|
||||
for (const toolResult of toolResults) {
|
||||
if (toolResult.toolCallId === toolCall.toolCallId) {
|
||||
// Extract the file content from the result
|
||||
if (toolResult.result && typeof toolResult.result === 'object') {
|
||||
const resultObj = toolResult.result as Record<string, unknown>;
|
||||
const fileObj = resultObj.file as Record<string, unknown> | undefined;
|
||||
if (fileObj?.text && typeof fileObj.text === 'string') {
|
||||
todosString = fileObj.text;
|
||||
} else if (resultObj.text && typeof resultObj.text === 'string') {
|
||||
todosString = resultObj.text;
|
||||
} else if (typeof resultObj === 'string') {
|
||||
todosString = resultObj;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (toolCall.toolName === 'createTodoList' && toolCall.args) {
|
||||
// The todos are in the args passed to the tool
|
||||
const args = toolCall.args as Record<string, unknown>;
|
||||
if (args.todos && typeof args.todos === 'string') {
|
||||
todosString = args.todos;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,6 +79,7 @@ const docsAgentExecution = async ({
|
|||
const result = await docsAgent.stream(messages, {
|
||||
instructions,
|
||||
runtimeContext,
|
||||
toolChoice: 'required',
|
||||
maxSteps: 50, // Allow more steps for complex documentation tasks
|
||||
});
|
||||
|
||||
|
@ -88,14 +89,34 @@ const docsAgentExecution = async ({
|
|||
let filesCreated = 0;
|
||||
const toolsUsed = new Set<string>();
|
||||
let finished = false;
|
||||
let stepCount = 0;
|
||||
let lastTextContent = '';
|
||||
|
||||
for await (const chunk of result.fullStream) {
|
||||
// Track step count
|
||||
if (chunk.type === 'step-start') {
|
||||
stepCount++;
|
||||
console.log(`[DocsAgent] Step ${stepCount} started`);
|
||||
}
|
||||
|
||||
// Log text chunks to see what the agent is thinking
|
||||
if (chunk.type === 'text-delta' && chunk.textDelta) {
|
||||
lastTextContent += chunk.textDelta;
|
||||
}
|
||||
|
||||
if (chunk.type === 'step-finish') {
|
||||
console.log(`[DocsAgent] Step ${stepCount} finished. Last text: ${lastTextContent.slice(0, 200)}...`);
|
||||
lastTextContent = '';
|
||||
}
|
||||
|
||||
// Track tool usage
|
||||
if (chunk.type === 'tool-call') {
|
||||
console.log(`[DocsAgent] Tool call: ${chunk.toolName} with args:`, JSON.stringify(chunk.args).slice(0, 200));
|
||||
toolsUsed.add(chunk.toolName);
|
||||
|
||||
// Track specific tool outcomes
|
||||
if (chunk.toolName === 'createFiles' || chunk.toolName === 'editFiles') {
|
||||
console.log(`[DocsAgent] Tool ${chunk.toolName} called - marking documentationCreated = true`);
|
||||
documentationCreated = true;
|
||||
filesCreated++;
|
||||
}
|
||||
|
@ -127,6 +148,13 @@ const docsAgentExecution = async ({
|
|||
// Get the final todo list state
|
||||
const finalTodoList = runtimeContext.get(DocsAgentContextKeys.TodoList) as string;
|
||||
|
||||
console.log('[DocsAgent] Final results:', {
|
||||
documentationCreated,
|
||||
filesCreated,
|
||||
toolsUsed: Array.from(toolsUsed),
|
||||
finished,
|
||||
});
|
||||
|
||||
return {
|
||||
todos: inputData.todos.split('\n').filter((line) => line.trim()),
|
||||
todoList: finalTodoList || inputData.todoList,
|
||||
|
|
|
@ -12,9 +12,7 @@ const bashCommandSchema = z.object({
|
|||
});
|
||||
|
||||
const inputSchema = z.object({
|
||||
commands: z
|
||||
.union([bashCommandSchema, z.array(bashCommandSchema)])
|
||||
.describe('Single command or array of bash commands to execute'),
|
||||
commands: z.array(bashCommandSchema),
|
||||
});
|
||||
|
||||
const outputSchema = z.object({
|
||||
|
@ -98,7 +96,9 @@ const executeBashCommands = wrapTraced(
|
|||
|
||||
export const executeBash = createTool({
|
||||
id: 'execute-bash',
|
||||
description: 'Executes bash commands and captures stdout, stderr, and exit codes',
|
||||
description: `Executes bash commands and captures stdout, stderr, and exit codes.
|
||||
|
||||
IMPORTANT: The 'commands' field must be an array of command objects: [{command: "pwd", description: "Print working directory"}, {command: "ls", description: "List files"}]`,
|
||||
inputSchema,
|
||||
outputSchema,
|
||||
execute: async ({
|
||||
|
|
|
@ -0,0 +1,433 @@
|
|||
import type { Sandbox } from '@buster/sandbox';
|
||||
import { currentSpan, initLogger, wrapTraced } from 'braintrust';
|
||||
import type { Logger as BraintrustLogger } from 'braintrust';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { DocsAgentContext } from '../../context/docs-agent-context';
|
||||
import docsAgentWorkflow from './docs-agent-workflow';
|
||||
import {
|
||||
TEST_MESSAGES,
|
||||
createContextWithClarifications,
|
||||
createContextWithTodos,
|
||||
createPartiallyCompletedContext,
|
||||
createTestContext,
|
||||
createTestWorkflowInput,
|
||||
validateWorkflowOutput,
|
||||
} from './test-helpers/context-helpers';
|
||||
import {
|
||||
type TestSandboxResult,
|
||||
addFilesToSandbox,
|
||||
createComplexProjectStructure,
|
||||
createFilesWithMissingDocs,
|
||||
createIntegrationTestSandbox,
|
||||
createMalformedYamlFiles,
|
||||
} from './test-helpers/sandbox-helpers';
|
||||
|
||||
describe('docs-agent-workflow', () => {
|
||||
let testSandbox: TestSandboxResult | null = null;
|
||||
let braintrustLogger: BraintrustLogger<true> | null = null;
|
||||
|
||||
// Initialize Braintrust logger before each test
|
||||
beforeEach(() => {
|
||||
if (process.env.BRAINTRUST_KEY) {
|
||||
braintrustLogger = initLogger({
|
||||
apiKey: process.env.BRAINTRUST_KEY,
|
||||
projectName: process.env.ENVIRONMENT,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Cleanup after each test
|
||||
afterEach(async () => {
|
||||
if (testSandbox) {
|
||||
await testSandbox.cleanup();
|
||||
testSandbox = null;
|
||||
}
|
||||
if (braintrustLogger) {
|
||||
await braintrustLogger.flush();
|
||||
braintrustLogger = null;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper to run workflow with Braintrust tracing
|
||||
*/
|
||||
async function runWorkflowWithTracing(input: unknown, metadata: Record<string, unknown> = {}) {
|
||||
if (!braintrustLogger) {
|
||||
// Run without tracing if no Braintrust key
|
||||
const run = docsAgentWorkflow.createRun();
|
||||
return await run.start({ inputData: input as any });
|
||||
}
|
||||
|
||||
return await wrapTraced(
|
||||
async () => {
|
||||
currentSpan().log({
|
||||
metadata: {
|
||||
testName: expect.getState().currentTestName,
|
||||
...metadata,
|
||||
},
|
||||
});
|
||||
|
||||
const run = docsAgentWorkflow.createRun();
|
||||
return await run.start({ inputData: input as any });
|
||||
},
|
||||
{
|
||||
name: 'Docs Agent Workflow Test',
|
||||
}
|
||||
)();
|
||||
}
|
||||
|
||||
describe('basic workflow execution', () => {
|
||||
it('should successfully document a simple dbt project', async () => {
|
||||
// Create test sandbox with a minimal project
|
||||
testSandbox = await createIntegrationTestSandbox({
|
||||
projectOptions: {
|
||||
projectName: 'test_analytics',
|
||||
companyName: 'TestCo',
|
||||
includeDocumentation: false, // Start without docs
|
||||
includeTests: false, // Simplify - no tests
|
||||
includeMacros: false, // Simplify - no macros
|
||||
},
|
||||
});
|
||||
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentSpecific, // Use simpler test message
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'basic-documentation',
|
||||
projectType: 'simple-dbt',
|
||||
});
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
// Check that the workflow completes
|
||||
expect(result.result).toBeDefined();
|
||||
expect(result.result.todos).toBeDefined();
|
||||
expect(result.result.todoList).toBeDefined();
|
||||
|
||||
// Log what actually happened for debugging
|
||||
console.log('Workflow completed with:', {
|
||||
documentationCreated: result.result.documentationCreated,
|
||||
filesCreated: result.result.metadata?.filesCreated,
|
||||
toolsUsed: result.result.metadata?.toolsUsed,
|
||||
finished: result.result.finished,
|
||||
});
|
||||
|
||||
// For now, we're just checking that the workflow runs without errors
|
||||
// The mock sandbox doesn't actually create files, but the agent should attempt to
|
||||
}
|
||||
}, 90000); // Increase timeout to 90 seconds
|
||||
|
||||
it('should handle pre-populated todo list', async () => {
|
||||
testSandbox = await createIntegrationTestSandbox();
|
||||
const context = createContextWithTodos(testSandbox.sandbox);
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.completePartial,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'todo-completion',
|
||||
});
|
||||
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
expect(result.result.todos).toBeDefined();
|
||||
expect(result.result.todoList).toBeDefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('should generate clarification questions when needed', async () => {
|
||||
testSandbox = await createIntegrationTestSandbox();
|
||||
|
||||
// Add files with unclear business logic
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createFilesWithMissingDocs(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.askClarification,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'clarification-needed',
|
||||
});
|
||||
|
||||
const validation = validateWorkflowOutput(result);
|
||||
expect(validation.isValid).toBe(true);
|
||||
|
||||
if (result.status === 'success' && result.result.clarificationNeeded) {
|
||||
expect(result.result.clarificationQuestion).toBeDefined();
|
||||
if (result.result.clarificationQuestion) {
|
||||
expect(result.result.clarificationQuestion.issue).toBeTruthy();
|
||||
expect(result.result.clarificationQuestion.context).toBeTruthy();
|
||||
expect(result.result.clarificationQuestion.clarificationQuestion).toBeTruthy();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle malformed YAML files gracefully', async () => {
|
||||
testSandbox = await createIntegrationTestSandbox();
|
||||
|
||||
// Add malformed YAML files
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createMalformedYamlFiles(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.fixYaml,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'error-handling',
|
||||
errorType: 'malformed-yaml',
|
||||
});
|
||||
|
||||
// Should complete successfully
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
expect(result.result).toBeDefined();
|
||||
console.log('Malformed YAML test completed with:', {
|
||||
documentationCreated: result.result.documentationCreated,
|
||||
clarificationNeeded: result.result.clarificationNeeded,
|
||||
toolsUsed: result.result.metadata?.toolsUsed,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle missing sandbox gracefully', async () => {
|
||||
const context = createTestContext({
|
||||
sandbox: null as unknown as Sandbox, // Intentionally pass null
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentAll,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'error-handling',
|
||||
errorType: 'missing-sandbox',
|
||||
});
|
||||
|
||||
// Workflow should fail gracefully
|
||||
expect(result.status).toBe('failed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('complex scenarios', () => {
|
||||
it('should handle large project with multiple domains', async () => {
|
||||
testSandbox = await createIntegrationTestSandbox({
|
||||
projectOptions: {
|
||||
includeDocumentation: false,
|
||||
},
|
||||
});
|
||||
|
||||
// Add complex project structure
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createComplexProjectStructure(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentAll,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'complex-project',
|
||||
projectComplexity: 'high',
|
||||
});
|
||||
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
expect(result.result).toBeDefined();
|
||||
console.log('Complex project test completed with:', {
|
||||
documentationCreated: result.result.documentationCreated,
|
||||
filesCreated: result.result.metadata?.filesCreated,
|
||||
toolsUsed: result.result.metadata?.toolsUsed,
|
||||
});
|
||||
}
|
||||
}, 30000); // Increase timeout for complex test
|
||||
|
||||
it('should resume partially completed documentation', async () => {
|
||||
testSandbox = await createIntegrationTestSandbox();
|
||||
const context = createPartiallyCompletedContext(testSandbox.sandbox);
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.completePartial,
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'resume-partial',
|
||||
});
|
||||
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
expect(result.result).toBeDefined();
|
||||
console.log('Resume partial test completed with:', {
|
||||
finished: result.result.finished,
|
||||
filesCreated: result.result.metadata?.filesCreated,
|
||||
toolsUsed: result.result.metadata?.toolsUsed,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('workflow structure', () => {
|
||||
it('should have correct input and output schemas', () => {
|
||||
expect(docsAgentWorkflow.inputSchema).toBeDefined();
|
||||
expect(docsAgentWorkflow.outputSchema).toBeDefined();
|
||||
});
|
||||
|
||||
it('should have all required steps', () => {
|
||||
const workflow = docsAgentWorkflow as any;
|
||||
// The workflow has a steps object with the step definitions
|
||||
const stepKeys = Object.keys(workflow.steps);
|
||||
expect(stepKeys).toHaveLength(3);
|
||||
expect(stepKeys).toContain('initialize-context');
|
||||
expect(stepKeys).toContain('create-docs-todos');
|
||||
expect(stepKeys).toContain('docs-agent');
|
||||
});
|
||||
});
|
||||
|
||||
describe('integration with real sandbox', () => {
|
||||
it.skip('should work with actual Daytona sandbox', async () => {
|
||||
// This test requires actual Daytona setup
|
||||
// Skip in CI, run locally with proper credentials
|
||||
|
||||
testSandbox = await createIntegrationTestSandbox({
|
||||
projectOptions: {
|
||||
projectName: 'production_analytics',
|
||||
companyName: 'RealCo',
|
||||
},
|
||||
});
|
||||
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: 'Document the entire dbt project with detailed explanations',
|
||||
context,
|
||||
});
|
||||
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'real-sandbox-integration',
|
||||
environment: 'production-like',
|
||||
});
|
||||
|
||||
expect(result.status).toBe('success');
|
||||
if (result.status === 'success') {
|
||||
expect(result.result.documentationCreated).toBe(true);
|
||||
}
|
||||
|
||||
// Verify files were actually created in sandbox
|
||||
const files = await (testSandbox.sandbox.fs as any).listDirectory(testSandbox.projectPath);
|
||||
expect(files).toContain('README.md');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Performance benchmark tests
|
||||
describe('docs-agent-workflow performance', () => {
|
||||
let testSandbox: TestSandboxResult | null = null;
|
||||
let braintrustLogger: BraintrustLogger<true> | null = null;
|
||||
|
||||
// Initialize Braintrust logger before each test
|
||||
beforeEach(() => {
|
||||
if (process.env.BRAINTRUST_KEY) {
|
||||
braintrustLogger = initLogger({
|
||||
apiKey: process.env.BRAINTRUST_KEY,
|
||||
projectName: 'DOCS-AGENT',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (testSandbox) {
|
||||
await testSandbox.cleanup();
|
||||
testSandbox = null;
|
||||
}
|
||||
if (braintrustLogger) {
|
||||
await braintrustLogger.flush();
|
||||
braintrustLogger = null;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper to run workflow with Braintrust tracing
|
||||
*/
|
||||
async function runWorkflowWithTracing(input: unknown, metadata: Record<string, unknown> = {}) {
|
||||
if (!braintrustLogger) {
|
||||
// Run without tracing if no Braintrust key
|
||||
const run = docsAgentWorkflow.createRun();
|
||||
return await run.start({ inputData: input as any });
|
||||
}
|
||||
|
||||
return await wrapTraced(
|
||||
async () => {
|
||||
currentSpan().log({
|
||||
metadata: {
|
||||
testName: expect.getState().currentTestName,
|
||||
...metadata,
|
||||
},
|
||||
});
|
||||
|
||||
const run = docsAgentWorkflow.createRun();
|
||||
return await run.start({ inputData: input as any });
|
||||
},
|
||||
{
|
||||
name: 'Docs Agent Workflow Test',
|
||||
}
|
||||
)();
|
||||
}
|
||||
|
||||
it('should complete basic documentation within reasonable time', async () => {
|
||||
const startTime = Date.now();
|
||||
|
||||
testSandbox = await createIntegrationTestSandbox();
|
||||
const context = createTestContext({ sandbox: testSandbox.sandbox });
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentSpecific,
|
||||
context,
|
||||
});
|
||||
|
||||
await runWorkflowWithTracing(input, {
|
||||
testType: 'performance',
|
||||
benchmark: true,
|
||||
});
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
expect(duration).toBeLessThan(10000); // Should complete within 10 seconds
|
||||
});
|
||||
});
|
|
@ -0,0 +1,91 @@
|
|||
import { describe, expect, it } from 'vitest';
|
||||
import docsAgentWorkflow from './docs-agent-workflow';
|
||||
import {
|
||||
TEST_MESSAGES,
|
||||
createTestContext,
|
||||
createTestWorkflowInput,
|
||||
} from './test-helpers/context-helpers';
|
||||
import { createMockSandbox } from './test-helpers/mock-sandbox';
|
||||
|
||||
describe('docs-agent-workflow with mock sandbox', () => {
|
||||
describe('workflow structure', () => {
|
||||
it('should have correct input and output schemas', () => {
|
||||
expect(docsAgentWorkflow.inputSchema).toBeDefined();
|
||||
expect(docsAgentWorkflow.outputSchema).toBeDefined();
|
||||
});
|
||||
|
||||
it('should have all required steps', () => {
|
||||
const workflow = docsAgentWorkflow as any;
|
||||
// The workflow has a steps object with the step definitions
|
||||
const stepKeys = Object.keys(workflow.steps);
|
||||
expect(stepKeys).toHaveLength(3);
|
||||
expect(stepKeys).toContain('initialize-context');
|
||||
expect(stepKeys).toContain('create-docs-todos');
|
||||
expect(stepKeys).toContain('docs-agent');
|
||||
});
|
||||
});
|
||||
|
||||
describe('test helpers', () => {
|
||||
it('should create mock sandbox successfully', () => {
|
||||
const mockSandbox = createMockSandbox();
|
||||
expect(mockSandbox).toBeDefined();
|
||||
expect(mockSandbox.id).toContain('mock-sandbox-');
|
||||
expect(mockSandbox.fs).toBeDefined();
|
||||
});
|
||||
|
||||
it('should create valid test context', () => {
|
||||
const mockSandbox = createMockSandbox();
|
||||
const context = createTestContext({ sandbox: mockSandbox });
|
||||
|
||||
expect(context).toBeDefined();
|
||||
expect(context.sandbox).toBe(mockSandbox);
|
||||
expect(context.dataSourceId).toMatch(
|
||||
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/
|
||||
);
|
||||
expect(context.todoList).toBe('');
|
||||
expect(context.clarificationQuestions).toEqual([]);
|
||||
});
|
||||
|
||||
it('should create valid workflow input', () => {
|
||||
const mockSandbox = createMockSandbox();
|
||||
const context = createTestContext({ sandbox: mockSandbox });
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentAll,
|
||||
context,
|
||||
});
|
||||
|
||||
expect(input).toBeDefined();
|
||||
expect(input.message).toBe(TEST_MESSAGES.documentAll);
|
||||
expect(input.organizationId).toBe('test-org-123');
|
||||
expect(input.context).toBe(context);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mock sandbox functionality', () => {
|
||||
it('should upload and read files', async () => {
|
||||
const mockSandbox = createMockSandbox();
|
||||
|
||||
// Upload a file
|
||||
await mockSandbox.fs.uploadFile('Test content', 'test.txt');
|
||||
|
||||
// Read the file
|
||||
const content = await (mockSandbox.fs as any).readFile('test.txt');
|
||||
expect(content).toBe('Test content');
|
||||
});
|
||||
|
||||
it('should list directory contents', async () => {
|
||||
const mockSandbox = createMockSandbox();
|
||||
|
||||
// Upload multiple files
|
||||
await mockSandbox.fs.uploadFile('Content 1', 'dir/file1.txt');
|
||||
await mockSandbox.fs.uploadFile('Content 2', 'dir/file2.txt');
|
||||
await mockSandbox.fs.uploadFile('Content 3', 'other/file3.txt');
|
||||
|
||||
// List directory
|
||||
const files = await (mockSandbox.fs as any).listDirectory('dir');
|
||||
expect(files).toHaveLength(2);
|
||||
expect(files).toContain('file1.txt');
|
||||
expect(files).toContain('file2.txt');
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,175 @@
|
|||
# Docs Agent Test Helpers
|
||||
|
||||
This directory contains test helpers for the docs agent workflow, including utilities for creating mock dbt projects, managing sandboxes, and running tests with Braintrust integration.
|
||||
|
||||
## Overview
|
||||
|
||||
The test helpers provide:
|
||||
|
||||
1. **Mock dbt Project Generator** - Creates realistic dbt projects for testing
|
||||
2. **Sandbox Management** - Handles creation, file uploads, and cleanup of Daytona sandboxes
|
||||
3. **Context Builders** - Creates valid `DocsAgentContext` objects for testing
|
||||
4. **Braintrust Integration** - Wraps test execution with observability
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Test Setup
|
||||
|
||||
```typescript
|
||||
import { createTestSandbox, createTestContext, createTestWorkflowInput } from './test-helpers';
|
||||
import docsAgentWorkflow from './docs-agent-workflow';
|
||||
|
||||
// Create a test sandbox with mock dbt project
|
||||
const testSandbox = await createTestSandbox({
|
||||
projectOptions: {
|
||||
projectName: 'analytics',
|
||||
companyName: 'TestCo',
|
||||
includeDocumentation: false,
|
||||
}
|
||||
});
|
||||
|
||||
// Create context and input
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: 'Document all models in this dbt project',
|
||||
context,
|
||||
});
|
||||
|
||||
// Run workflow
|
||||
const result = await docsAgentWorkflow.createRun().start({ inputData: input });
|
||||
|
||||
// Cleanup
|
||||
await testSandbox.cleanup();
|
||||
```
|
||||
|
||||
### Mock dbt Project Options
|
||||
|
||||
The `generateMockDbtProject` function creates a complete dbt project with:
|
||||
|
||||
- **Staging models** - Stripe data transformations
|
||||
- **Mart models** - Business metrics (MRR, revenue)
|
||||
- **Schema documentation** - YAML files with descriptions
|
||||
- **Tests** - Data quality checks
|
||||
- **Macros** - Custom dbt macros
|
||||
- **Configuration** - dbt_project.yml, packages.yml
|
||||
|
||||
### Test Scenarios
|
||||
|
||||
#### 1. Basic Documentation
|
||||
```typescript
|
||||
const testSandbox = await createTestSandbox({
|
||||
projectOptions: {
|
||||
includeDocumentation: false, // Start without docs
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
#### 2. Missing Documentation
|
||||
```typescript
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createFilesWithMissingDocs(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
```
|
||||
|
||||
#### 3. Malformed YAML
|
||||
```typescript
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createMalformedYamlFiles(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
```
|
||||
|
||||
#### 4. Complex Project
|
||||
```typescript
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createComplexProjectStructure(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
```
|
||||
|
||||
### Context Variations
|
||||
|
||||
```typescript
|
||||
// Context with pre-populated todos
|
||||
const context = createContextWithTodos(sandbox);
|
||||
|
||||
// Context with clarification questions
|
||||
const context = createContextWithClarifications(sandbox);
|
||||
|
||||
// Partially completed context
|
||||
const context = createPartiallyCompletedContext(sandbox);
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Run all workflow tests
|
||||
bun test docs-agent-workflow.test.ts
|
||||
|
||||
# Run specific test
|
||||
bun test docs-agent-workflow.test.ts -t "should successfully document"
|
||||
|
||||
# Run example
|
||||
tsx packages/ai/src/workflows/docs-agent/test-helpers/run-example.ts
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
test-helpers/
|
||||
├── mock-dbt-project.ts # dbt project generator
|
||||
├── sandbox-helpers.ts # Sandbox management utilities
|
||||
├── context-helpers.ts # Context builders and validators
|
||||
├── run-example.ts # Example runner script
|
||||
├── index.ts # Exports
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Key Functions
|
||||
|
||||
### Mock Project Generation
|
||||
- `generateMockDbtProject()` - Creates complete dbt project
|
||||
- `generateProjectVariations()` - Returns different project configurations
|
||||
|
||||
### Sandbox Management
|
||||
- `createTestSandbox()` - Creates sandbox with mock project
|
||||
- `addFilesToSandbox()` - Adds additional files
|
||||
- `createLocalTestProject()` - Creates local temp directory (for non-sandbox testing)
|
||||
|
||||
### Context Helpers
|
||||
- `createTestContext()` - Creates basic context
|
||||
- `createTestWorkflowInput()` - Creates workflow input
|
||||
- `validateWorkflowOutput()` - Validates workflow results
|
||||
|
||||
### Test Data
|
||||
- `createMalformedYamlFiles()` - Invalid YAML for error testing
|
||||
- `createFilesWithMissingDocs()` - Models without documentation
|
||||
- `createComplexProjectStructure()` - Multi-domain project
|
||||
|
||||
## Braintrust Integration
|
||||
|
||||
Tests automatically integrate with Braintrust when `BRAINTRUST_KEY` is set:
|
||||
|
||||
```typescript
|
||||
const result = await runWorkflowWithTracing(input, {
|
||||
testType: 'basic-documentation',
|
||||
projectType: 'simple-dbt',
|
||||
});
|
||||
```
|
||||
|
||||
Traces appear in the `DOCS-AGENT` project in Braintrust.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always cleanup sandboxes** - Use the cleanup function in afterEach hooks
|
||||
2. **Use descriptive test metadata** - Pass meaningful metadata to Braintrust
|
||||
3. **Test edge cases** - Include malformed files, missing docs, complex structures
|
||||
4. **Validate outputs** - Use the validation helper to check workflow results
|
||||
5. **Mock realistically** - The mock dbt project should resemble real projects
|
|
@ -0,0 +1,176 @@
|
|||
import type { Sandbox } from '@buster/sandbox';
|
||||
import type { DocsAgentContext } from '../../../context/docs-agent-context';
|
||||
|
||||
export interface CreateTestContextOptions {
|
||||
sandbox: Sandbox;
|
||||
dataSourceId?: string;
|
||||
todoList?: string[];
|
||||
clarificationQuestions?: Array<{
|
||||
issue: string;
|
||||
context: string;
|
||||
clarificationQuestion: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a test DocsAgentContext with sensible defaults
|
||||
*/
|
||||
export function createTestContext(options: CreateTestContextOptions): DocsAgentContext {
|
||||
const {
|
||||
sandbox,
|
||||
dataSourceId = '550e8400-e29b-41d4-a716-446655440000', // Valid UUID v4
|
||||
todoList = [],
|
||||
clarificationQuestions = [],
|
||||
} = options;
|
||||
|
||||
return {
|
||||
sandbox,
|
||||
todoList: todoList.join('\n'),
|
||||
clarificationQuestions,
|
||||
dataSourceId,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a context with pre-populated todos for testing
|
||||
*/
|
||||
export function createContextWithTodos(sandbox: Sandbox): DocsAgentContext {
|
||||
return createTestContext({
|
||||
sandbox,
|
||||
todoList: [
|
||||
'Document the staging models in models/staging/stripe/',
|
||||
'Add descriptions to all columns in fct_mrr model',
|
||||
'Create README for the finance mart',
|
||||
'Update main project README with setup instructions',
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a context with clarification questions
|
||||
*/
|
||||
export function createContextWithClarifications(sandbox: Sandbox): DocsAgentContext {
|
||||
return createTestContext({
|
||||
sandbox,
|
||||
clarificationQuestions: [
|
||||
{
|
||||
issue: 'Missing column documentation',
|
||||
context: 'The stg_stripe__customers model has columns without descriptions',
|
||||
clarificationQuestion:
|
||||
'What does the "delinquent" column represent in the customers table?',
|
||||
},
|
||||
{
|
||||
issue: 'Unclear business logic',
|
||||
context: 'The MRR calculation in fct_mrr uses complex logic',
|
||||
clarificationQuestion:
|
||||
'Should MRR include customers in trial status or only active paying customers?',
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a context simulating a partially completed workflow
|
||||
*/
|
||||
export function createPartiallyCompletedContext(sandbox: Sandbox): DocsAgentContext {
|
||||
return createTestContext({
|
||||
sandbox,
|
||||
todoList: [
|
||||
'Document the staging models in models/staging/stripe/', // Completed
|
||||
'Add descriptions to all columns in fct_mrr model',
|
||||
'Create README for the finance mart', // Completed
|
||||
'Update main project README with setup instructions',
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates test inputs for the workflow
|
||||
*/
|
||||
export interface CreateTestInputOptions {
|
||||
message?: string;
|
||||
organizationId?: string;
|
||||
context: DocsAgentContext;
|
||||
}
|
||||
|
||||
export function createTestWorkflowInput(options: CreateTestInputOptions) {
|
||||
const {
|
||||
message = 'Please document all the models in this dbt project',
|
||||
organizationId = 'test-org-123',
|
||||
context,
|
||||
} = options;
|
||||
|
||||
return {
|
||||
message,
|
||||
organizationId,
|
||||
context,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Common test messages for different scenarios
|
||||
*/
|
||||
export const TEST_MESSAGES = {
|
||||
documentAll:
|
||||
'Please document all the models in this dbt project. The project files are in the dbt_project directory. First, use grepSearch to find all .sql and .yml files. Use path "dbt_project" and pattern "\\.sql$|\\.yml$" with recursive=true.',
|
||||
documentSpecific:
|
||||
'Please create a simple documentation file for the fct_mrr model. Just add a basic schema.yml file in the dbt_project/models/marts/finance/ directory with a description for the model.',
|
||||
updateReadme: 'Update the README files to include setup instructions',
|
||||
addTests: 'Add schema tests for all staging models',
|
||||
fixYaml: 'Fix any malformed YAML files in the project',
|
||||
askClarification: 'Document the project but I need clarification on business logic',
|
||||
completePartial: 'Continue documenting from where we left off',
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper to validate workflow output
|
||||
*/
|
||||
export function validateWorkflowOutput(output: unknown): {
|
||||
isValid: boolean;
|
||||
errors: string[];
|
||||
} {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Type guard to check if output is an object
|
||||
if (!output || typeof output !== 'object') {
|
||||
errors.push('Output must be an object');
|
||||
return { isValid: false, errors };
|
||||
}
|
||||
|
||||
const outputObj = output as Record<string, unknown>;
|
||||
|
||||
// Check for required fields based on the output type
|
||||
if (outputObj.clarificationNeeded) {
|
||||
if (!outputObj.clarificationQuestion) {
|
||||
errors.push('clarificationQuestion is required when clarificationNeeded is true');
|
||||
}
|
||||
const clarificationQuestion = outputObj.clarificationQuestion as Record<string, unknown>;
|
||||
if (clarificationQuestion) {
|
||||
if (!clarificationQuestion.issue) {
|
||||
errors.push('clarificationQuestion.issue is required');
|
||||
}
|
||||
if (!clarificationQuestion.context) {
|
||||
errors.push('clarificationQuestion.context is required');
|
||||
}
|
||||
if (!clarificationQuestion.clarificationQuestion) {
|
||||
errors.push('clarificationQuestion.clarificationQuestion is required');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outputObj.documentationCreated) {
|
||||
const metadata = outputObj.metadata as Record<string, unknown>;
|
||||
if (metadata && typeof metadata.filesCreated !== 'number') {
|
||||
errors.push('metadata.filesCreated should be a number when documentation is created');
|
||||
}
|
||||
}
|
||||
|
||||
if (outputObj.todos && !Array.isArray(outputObj.todos)) {
|
||||
errors.push('todos should be an array');
|
||||
}
|
||||
|
||||
return {
|
||||
isValid: errors.length === 0,
|
||||
errors,
|
||||
};
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
// Export all test helpers
|
||||
export * from './mock-dbt-project';
|
||||
export * from './sandbox-helpers';
|
||||
export * from './context-helpers';
|
||||
export * from './mock-sandbox';
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,378 @@
|
|||
import type { Sandbox } from '@buster/sandbox';
|
||||
|
||||
/**
|
||||
* Creates a mock sandbox for testing without requiring Daytona API
|
||||
*/
|
||||
export function createMockSandbox(): Sandbox {
|
||||
const mockId = `mock-sandbox-${Date.now()}`;
|
||||
|
||||
const fileStorage = new Map<string, string | Buffer>();
|
||||
|
||||
const mockSandbox: Sandbox = {
|
||||
id: mockId,
|
||||
|
||||
fs: {
|
||||
uploadFile: async (content: Buffer | string, path: string) => {
|
||||
// Preserve full path structure
|
||||
fileStorage.set(path, content);
|
||||
return Promise.resolve();
|
||||
},
|
||||
|
||||
uploadFiles: async (files: Array<{ source: Buffer; destination: string }>) => {
|
||||
for (const file of files) {
|
||||
// Preserve full path structure
|
||||
fileStorage.set(file.destination, file.source);
|
||||
}
|
||||
return Promise.resolve();
|
||||
},
|
||||
|
||||
createFolder: async (_path: string, _permissions?: string) => {
|
||||
// Mock folder creation - folders are implicit in our file storage
|
||||
return Promise.resolve();
|
||||
},
|
||||
|
||||
readFile: async (path: string) => {
|
||||
const content = fileStorage.get(path);
|
||||
if (!content) {
|
||||
throw new Error(`File not found: ${path}`);
|
||||
}
|
||||
return content;
|
||||
},
|
||||
|
||||
listDirectory: async (path: string) => {
|
||||
const normalizedPath = path.endsWith('/') ? path : `${path}/`;
|
||||
const files = new Set<string>();
|
||||
|
||||
for (const filePath of fileStorage.keys()) {
|
||||
// Check if file is in the requested directory
|
||||
if (filePath.startsWith(normalizedPath)) {
|
||||
// Get the relative path from the directory
|
||||
const relativePath = filePath.slice(normalizedPath.length);
|
||||
// Get only the immediate child (file or directory)
|
||||
const parts = relativePath.split('/');
|
||||
if (parts[0]) {
|
||||
files.add(parts[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(files);
|
||||
},
|
||||
|
||||
deleteFile: async (path: string) => {
|
||||
fileStorage.delete(path);
|
||||
return Promise.resolve();
|
||||
},
|
||||
|
||||
exists: async (path: string) => {
|
||||
// Check if it's a file
|
||||
if (fileStorage.has(path)) {
|
||||
return true;
|
||||
}
|
||||
// Check if it's a directory (has files with this prefix)
|
||||
const normalizedPath = path.endsWith('/') ? path : `${path}/`;
|
||||
for (const filePath of fileStorage.keys()) {
|
||||
if (filePath.startsWith(normalizedPath)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
|
||||
stat: async (path: string) => {
|
||||
const exists = fileStorage.has(path);
|
||||
if (!exists) {
|
||||
throw new Error(`File not found: ${path}`);
|
||||
}
|
||||
return {
|
||||
isFile: true,
|
||||
isDirectory: false,
|
||||
size: (fileStorage.get(path) as Buffer | string)?.length || 0,
|
||||
mtime: new Date(),
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
exec: async (command: string) => {
|
||||
// Mock command execution with special handling for typescript execution
|
||||
if (command.includes('node') && command.includes('.ts')) {
|
||||
// This is likely a TypeScript execution from runTypescript
|
||||
// Extract the code and execute it
|
||||
try {
|
||||
// For grep search simulation
|
||||
if (command.includes('grep-search')) {
|
||||
const searchResults = [];
|
||||
for (const [filePath, content] of Array.from(fileStorage.entries())) {
|
||||
if (filePath.endsWith('.sql') || filePath.endsWith('.yml')) {
|
||||
searchResults.push({
|
||||
success: true,
|
||||
path: filePath,
|
||||
pattern: '.*',
|
||||
matches: [
|
||||
{
|
||||
file: filePath,
|
||||
lineNumber: 1,
|
||||
content: content.toString().split('\n')[0] || '',
|
||||
},
|
||||
],
|
||||
matchCount: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
return {
|
||||
stdout: JSON.stringify(searchResults),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// For read files simulation
|
||||
if (command.includes('read-files')) {
|
||||
const results = [];
|
||||
// Parse which files are being requested - this is a simplified mock
|
||||
for (const [filePath, content] of Array.from(fileStorage.entries())) {
|
||||
results.push({
|
||||
success: true,
|
||||
filePath: filePath,
|
||||
content: content.toString(),
|
||||
truncated: false,
|
||||
});
|
||||
}
|
||||
return {
|
||||
stdout: JSON.stringify(results),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
stdout: '',
|
||||
stderr: error instanceof Error ? error.message : 'Mock execution error',
|
||||
exitCode: 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
stdout: `Mock execution of: ${command}`,
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
},
|
||||
|
||||
close: async () => {
|
||||
// Mock cleanup
|
||||
fileStorage.clear();
|
||||
return Promise.resolve();
|
||||
},
|
||||
|
||||
// Additional mock methods as needed
|
||||
process: {
|
||||
run: async (command: string) => {
|
||||
return {
|
||||
stdout: `Mock run: ${command}`,
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
},
|
||||
codeRun: async (code: string, _options?: Record<string, unknown>, _timeout?: number) => {
|
||||
// Mock TypeScript code execution
|
||||
try {
|
||||
// Handle ls files operations
|
||||
if (code.includes('lsFilesConcurrently') || code.includes('ls ')) {
|
||||
const results = [];
|
||||
|
||||
// Extract paths from the code
|
||||
const pathsMatch = code.match(/const paths = (\[.*?\]);/s);
|
||||
if (pathsMatch) {
|
||||
const paths = JSON.parse(pathsMatch[1]);
|
||||
|
||||
for (const requestedPath of paths) {
|
||||
const normalizedPath = requestedPath.endsWith('/')
|
||||
? requestedPath.slice(0, -1)
|
||||
: requestedPath;
|
||||
const entries: Array<{ name: string; type: string; size?: string }> = [];
|
||||
|
||||
// Check if path exists as a directory
|
||||
let isDirectory = false;
|
||||
const dirPath = normalizedPath.endsWith('/')
|
||||
? normalizedPath
|
||||
: `${normalizedPath}/`;
|
||||
|
||||
for (const filePath of fileStorage.keys()) {
|
||||
if (filePath.startsWith(dirPath) || filePath === normalizedPath) {
|
||||
isDirectory = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isDirectory && !fileStorage.has(normalizedPath)) {
|
||||
results.push({
|
||||
success: false,
|
||||
path: requestedPath,
|
||||
error: 'Path not found',
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// List files in directory
|
||||
for (const [filePath, content] of Array.from(fileStorage.entries())) {
|
||||
if (filePath.startsWith(dirPath) && filePath !== normalizedPath) {
|
||||
const relativePath = filePath.slice(dirPath.length);
|
||||
const parts = relativePath.split('/');
|
||||
if (parts[0] && !entries.find((e) => e.name === parts[0])) {
|
||||
const entry: { name: string; type: string; size?: string } = {
|
||||
name: parts[0],
|
||||
type: parts.length > 1 ? 'directory' : 'file',
|
||||
};
|
||||
if (parts.length === 1) {
|
||||
entry.size = content.toString().length.toString();
|
||||
}
|
||||
entries.push(entry);
|
||||
}
|
||||
} else if (filePath === normalizedPath) {
|
||||
// It's a file
|
||||
entries.push({
|
||||
name: filePath.split('/').pop() || filePath,
|
||||
type: 'file',
|
||||
size: content.toString().length.toString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
results.push({
|
||||
success: true,
|
||||
path: requestedPath,
|
||||
entries: entries.length > 0 ? entries : undefined,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
result: JSON.stringify(results),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Simple pattern matching for grep search code
|
||||
if (code.includes('executeGrepSearch') || code.includes('grep')) {
|
||||
const searchResults = [];
|
||||
|
||||
// Check if searching in dbt_project directory
|
||||
const pathMatch = code.match(/path:\s*["']([^"']+)["']/);
|
||||
const searchPath = pathMatch ? pathMatch[1] : '.';
|
||||
|
||||
for (const [filePath, content] of Array.from(fileStorage.entries())) {
|
||||
// Only include files that match the search path
|
||||
if (searchPath === '.' || (searchPath && filePath.startsWith(searchPath))) {
|
||||
if (filePath.endsWith('.sql') || filePath.endsWith('.yml')) {
|
||||
searchResults.push({
|
||||
success: true,
|
||||
path: searchPath,
|
||||
pattern: '.*',
|
||||
matches: [
|
||||
{
|
||||
file: filePath,
|
||||
lineNumber: 1,
|
||||
content: content.toString().split('\n')[0] || '',
|
||||
},
|
||||
],
|
||||
matchCount: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
result: JSON.stringify(searchResults),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Simple pattern matching for read files code
|
||||
if (code.includes('readFile') || code.includes('readFiles')) {
|
||||
const results = [];
|
||||
|
||||
// Try to extract file paths from the code
|
||||
const filePathMatches = code.matchAll(/["']([^"']+\.(sql|yml|md))["']/g);
|
||||
const requestedFiles = Array.from(filePathMatches).map((match) => match[1]);
|
||||
|
||||
if (requestedFiles.length > 0) {
|
||||
// Return specific files requested
|
||||
for (const requestedFile of requestedFiles) {
|
||||
if (requestedFile) {
|
||||
const content = fileStorage.get(requestedFile);
|
||||
if (content) {
|
||||
results.push({
|
||||
success: true,
|
||||
filePath: requestedFile,
|
||||
content: content.toString(),
|
||||
truncated: false,
|
||||
});
|
||||
} else {
|
||||
results.push({
|
||||
success: false,
|
||||
filePath: requestedFile,
|
||||
error: `File not found: ${requestedFile}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Return all files if no specific files requested
|
||||
for (const [filePath, content] of Array.from(fileStorage.entries())) {
|
||||
results.push({
|
||||
success: true,
|
||||
filePath: filePath,
|
||||
content: content.toString(),
|
||||
truncated: false,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
result: JSON.stringify(results),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Simple pattern matching for create/edit files code
|
||||
if (
|
||||
code.includes('writeFile') ||
|
||||
code.includes('createFile') ||
|
||||
code.includes('fs.write')
|
||||
) {
|
||||
// Simulate successful file creation
|
||||
return {
|
||||
result: JSON.stringify({
|
||||
success: true,
|
||||
message: 'Files created successfully',
|
||||
filesCreated: 1,
|
||||
}),
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Default mock response
|
||||
return {
|
||||
result: 'Mock TypeScript execution completed',
|
||||
stderr: '',
|
||||
exitCode: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
result: '',
|
||||
stderr: error instanceof Error ? error.message : 'Mock execution error',
|
||||
exitCode: 1,
|
||||
};
|
||||
}
|
||||
},
|
||||
},
|
||||
} as unknown as Sandbox;
|
||||
|
||||
return mockSandbox;
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
#!/usr/bin/env tsx
|
||||
/**
|
||||
* Example runner to demonstrate how to use the docs agent test helpers
|
||||
* Run this file with: tsx packages/ai/src/workflows/docs-agent/test-helpers/run-example.ts
|
||||
*/
|
||||
|
||||
import { currentSpan, initLogger, wrapTraced } from 'braintrust';
|
||||
import docsAgentWorkflow from '../docs-agent-workflow';
|
||||
import { TEST_MESSAGES, createTestContext, createTestWorkflowInput } from './context-helpers';
|
||||
import {
|
||||
addFilesToSandbox,
|
||||
createFilesWithMissingDocs,
|
||||
createIntegrationTestSandbox,
|
||||
} from './sandbox-helpers';
|
||||
|
||||
async function runExample() {
|
||||
console.info('🚀 Starting docs agent example...\n');
|
||||
|
||||
// Initialize Braintrust logger if key is available
|
||||
let braintrustLogger = null;
|
||||
if (process.env.BRAINTRUST_KEY) {
|
||||
braintrustLogger = initLogger({
|
||||
apiKey: process.env.BRAINTRUST_KEY,
|
||||
projectName: 'DOCS-AGENT',
|
||||
});
|
||||
console.info('✅ Braintrust logging enabled\n');
|
||||
} else {
|
||||
console.warn('⚠️ No BRAINTRUST_KEY found, running without logging\n');
|
||||
}
|
||||
|
||||
let testSandbox: Awaited<ReturnType<typeof createIntegrationTestSandbox>> | null = null;
|
||||
|
||||
try {
|
||||
// Step 1: Create a test sandbox with mock dbt project
|
||||
console.info('📦 Creating test sandbox with mock dbt project...');
|
||||
testSandbox = await createIntegrationTestSandbox({
|
||||
projectOptions: {
|
||||
projectName: 'example_analytics',
|
||||
companyName: 'ExampleCo',
|
||||
includeDocumentation: false, // Start without docs
|
||||
includeTests: true,
|
||||
includeMacros: true,
|
||||
},
|
||||
});
|
||||
console.info(`✅ Sandbox created: ${testSandbox.sandboxId}\n`);
|
||||
|
||||
// Step 2: Add some files that need documentation
|
||||
console.info('📄 Adding files with missing documentation...');
|
||||
await addFilesToSandbox(
|
||||
testSandbox.sandbox,
|
||||
createFilesWithMissingDocs(),
|
||||
testSandbox.projectPath
|
||||
);
|
||||
console.info('✅ Additional files added\n');
|
||||
|
||||
// Step 3: Create context and input
|
||||
const context = createTestContext({
|
||||
sandbox: testSandbox.sandbox,
|
||||
});
|
||||
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentAll,
|
||||
organizationId: 'example-org-123',
|
||||
context,
|
||||
});
|
||||
|
||||
// Step 4: Run the workflow
|
||||
console.info('🤖 Running docs agent workflow...');
|
||||
console.info(`Message: "${input.message}"\n`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = braintrustLogger
|
||||
? await wrapTraced(
|
||||
async () => {
|
||||
currentSpan().log({
|
||||
metadata: {
|
||||
exampleRun: true,
|
||||
sandboxId: testSandbox?.sandboxId,
|
||||
projectName: 'example_analytics',
|
||||
},
|
||||
});
|
||||
|
||||
const run = docsAgentWorkflow.createRun();
|
||||
return await run.start({ inputData: input });
|
||||
},
|
||||
{ name: 'Docs Agent Example Run' }
|
||||
)()
|
||||
: await docsAgentWorkflow.createRun().start({ inputData: input });
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
// Step 5: Display results
|
||||
console.info('\n📊 Workflow Results:');
|
||||
console.info(`✅ Completed in ${duration}ms`);
|
||||
console.info('\nOutput:');
|
||||
console.info(JSON.stringify(result, null, 2));
|
||||
|
||||
if (result.status === 'success' && result.result.documentationCreated) {
|
||||
console.info(
|
||||
`\n✅ Documentation created! ${result.result.metadata?.filesCreated || 0} files written`
|
||||
);
|
||||
if (result.result.metadata?.toolsUsed) {
|
||||
console.info(`Tools used: ${result.result.metadata.toolsUsed.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.status === 'success' && result.result.clarificationNeeded) {
|
||||
console.info('\n❓ Clarification needed:');
|
||||
console.info(`Issue: ${result.result.clarificationQuestion?.issue}`);
|
||||
console.info(`Question: ${result.result.clarificationQuestion?.clarificationQuestion}`);
|
||||
}
|
||||
|
||||
if (result.status === 'success' && result.result.todos) {
|
||||
console.info(`\n📝 Generated ${result.result.todos.length} todos`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('\n❌ Error running example:', error);
|
||||
throw error;
|
||||
} finally {
|
||||
// Cleanup
|
||||
if (testSandbox) {
|
||||
console.info('\n🧹 Cleaning up sandbox...');
|
||||
await testSandbox.cleanup();
|
||||
console.info('✅ Cleanup complete');
|
||||
}
|
||||
|
||||
if (braintrustLogger) {
|
||||
await braintrustLogger.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the example
|
||||
runExample()
|
||||
.then(() => {
|
||||
console.info('\n🎉 Example completed successfully!');
|
||||
process.exit(0);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('\n💥 Example failed:', error);
|
||||
process.exit(1);
|
||||
});
|
|
@ -0,0 +1,343 @@
|
|||
import { promises as fs } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { type FileInput, type Sandbox, addFiles, createSandbox } from '@buster/sandbox';
|
||||
import { type MockDbtProjectOptions, generateMockDbtProject } from './mock-dbt-project';
|
||||
import { createMockSandbox } from './mock-sandbox';
|
||||
|
||||
export interface TestSandboxOptions {
|
||||
projectOptions?: MockDbtProjectOptions;
|
||||
additionalFiles?: FileInput[];
|
||||
baseDir?: string;
|
||||
}
|
||||
|
||||
export interface TestSandboxResult {
|
||||
sandbox: Sandbox;
|
||||
sandboxId: string;
|
||||
projectPath: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a test sandbox with a mock dbt project (always uses in-memory mock sandbox)
|
||||
* Use this for unit tests that don't need real sandbox functionality
|
||||
*/
|
||||
export async function createTestSandbox(
|
||||
options: TestSandboxOptions = {}
|
||||
): Promise<TestSandboxResult> {
|
||||
const { projectOptions = {}, additionalFiles = [], baseDir = 'dbt_project' } = options;
|
||||
|
||||
// Always use mock sandbox for unit tests
|
||||
const sandbox = createMockSandbox();
|
||||
const sandboxId = sandbox.id;
|
||||
|
||||
// Generate mock project files
|
||||
const projectFiles = generateMockDbtProject(projectOptions);
|
||||
const allFiles = [...projectFiles, ...additionalFiles];
|
||||
|
||||
// Upload files to sandbox
|
||||
console.log(`[TestSandbox] Uploading ${allFiles.length} files to ${baseDir} directory`);
|
||||
console.log('[TestSandbox] Files being uploaded:', allFiles.map(f => f.path).slice(0, 10));
|
||||
|
||||
const uploadResult = await addFiles(sandbox, allFiles, {
|
||||
baseDestination: baseDir,
|
||||
overwrite: true,
|
||||
});
|
||||
|
||||
if (!uploadResult.success) {
|
||||
throw new Error(
|
||||
`Failed to upload files to sandbox: ${JSON.stringify(uploadResult.failedFiles)}`
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`[TestSandbox] Successfully uploaded files to ${baseDir}`);
|
||||
console.log('[TestSandbox] Uploaded files count:', uploadResult.uploadedFiles.length);
|
||||
|
||||
// Return sandbox info with cleanup function
|
||||
return {
|
||||
sandbox,
|
||||
sandboxId,
|
||||
projectPath: baseDir,
|
||||
cleanup: async () => {
|
||||
// Mock sandbox cleanup - storage cleared in memory
|
||||
// No explicit close needed for mock sandbox
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a real sandbox with a mock dbt project for integration testing
|
||||
* This will use actual Daytona sandbox when DAYTONA_API_KEY is available,
|
||||
* otherwise falls back to mock sandbox
|
||||
*/
|
||||
export async function createIntegrationTestSandbox(
|
||||
options: TestSandboxOptions = {}
|
||||
): Promise<TestSandboxResult> {
|
||||
const { projectOptions = {}, additionalFiles = [], baseDir = 'dbt_project' } = options;
|
||||
|
||||
let sandbox: Sandbox;
|
||||
try {
|
||||
// Try to create real sandbox if Daytona is available
|
||||
sandbox = await createSandbox({ language: 'typescript' });
|
||||
console.info('Using real Daytona sandbox for integration test');
|
||||
} catch (_error) {
|
||||
// Fall back to mock sandbox if Daytona is not available
|
||||
console.warn('Daytona not available, using mock sandbox for integration test');
|
||||
sandbox = createMockSandbox();
|
||||
}
|
||||
|
||||
const sandboxId = sandbox.id;
|
||||
|
||||
// Generate mock project files
|
||||
const projectFiles = generateMockDbtProject(projectOptions);
|
||||
const allFiles = [...projectFiles, ...additionalFiles];
|
||||
|
||||
// Upload files to sandbox
|
||||
console.log(`[TestSandbox] Uploading ${allFiles.length} files to ${baseDir} directory`);
|
||||
console.log('[TestSandbox] Files being uploaded:', allFiles.map(f => f.path).slice(0, 10));
|
||||
|
||||
const uploadResult = await addFiles(sandbox, allFiles, {
|
||||
baseDestination: baseDir,
|
||||
overwrite: true,
|
||||
});
|
||||
|
||||
if (!uploadResult.success) {
|
||||
throw new Error(
|
||||
`Failed to upload files to sandbox: ${JSON.stringify(uploadResult.failedFiles)}`
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`[TestSandbox] Successfully uploaded files to ${baseDir}`);
|
||||
console.log('[TestSandbox] Uploaded files count:', uploadResult.uploadedFiles.length);
|
||||
|
||||
// Return sandbox info with cleanup function
|
||||
return {
|
||||
sandbox,
|
||||
sandboxId,
|
||||
projectPath: baseDir,
|
||||
cleanup: async () => {
|
||||
// Try to close real sandbox if it has the method
|
||||
if ('close' in sandbox && typeof sandbox.close === 'function') {
|
||||
try {
|
||||
await sandbox.close();
|
||||
} catch (error) {
|
||||
console.error('Error closing sandbox:', error);
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a local temporary directory with a mock dbt project for testing
|
||||
*/
|
||||
export async function createLocalTestProject(
|
||||
options: MockDbtProjectOptions = {}
|
||||
): Promise<{ projectPath: string; cleanup: () => Promise<void> }> {
|
||||
const tempDir = path.join(tmpdir(), `dbt-test-${Date.now()}`);
|
||||
await fs.mkdir(tempDir, { recursive: true });
|
||||
|
||||
const projectFiles = generateMockDbtProject(options);
|
||||
|
||||
// Write all files to temp directory
|
||||
for (const file of projectFiles) {
|
||||
const filePath = path.join(tempDir, file.path);
|
||||
const fileDir = path.dirname(filePath);
|
||||
|
||||
await fs.mkdir(fileDir, { recursive: true });
|
||||
|
||||
const content =
|
||||
typeof file.content === 'string' ? file.content : file.content?.toString() || '';
|
||||
|
||||
await fs.writeFile(filePath, content, 'utf-8');
|
||||
}
|
||||
|
||||
return {
|
||||
projectPath: tempDir,
|
||||
cleanup: async () => {
|
||||
try {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
} catch (error) {
|
||||
console.error('Failed to cleanup temp directory:', error);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to add additional files to an existing sandbox
|
||||
*/
|
||||
export async function addFilesToSandbox(
|
||||
sandbox: Sandbox,
|
||||
files: FileInput[],
|
||||
baseDir = 'dbt_project'
|
||||
): Promise<void> {
|
||||
const uploadResult = await addFiles(sandbox, files, {
|
||||
baseDestination: baseDir,
|
||||
overwrite: true,
|
||||
});
|
||||
|
||||
if (!uploadResult.success) {
|
||||
throw new Error(`Failed to add files to sandbox: ${JSON.stringify(uploadResult.failedFiles)}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create malformed YAML files for error testing
|
||||
*/
|
||||
export function createMalformedYamlFiles(): FileInput[] {
|
||||
return [
|
||||
{
|
||||
path: 'models/staging/malformed_schema.yml',
|
||||
content: `version: 2
|
||||
|
||||
models:
|
||||
- name: test_model
|
||||
description: "Test model with malformed YAML"
|
||||
columns:
|
||||
- name: id
|
||||
description: "Missing closing quote
|
||||
tests:
|
||||
- unique
|
||||
- name: invalid
|
||||
tests: [not_null
|
||||
`,
|
||||
},
|
||||
{
|
||||
path: 'dbt_project.yml',
|
||||
content: `name: 'broken_project
|
||||
version: '1.0.0'
|
||||
invalid_indentation:
|
||||
- mixed tabs and spaces
|
||||
- this will break
|
||||
`,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create files with missing documentation
|
||||
*/
|
||||
export function createFilesWithMissingDocs(): FileInput[] {
|
||||
return [
|
||||
{
|
||||
path: 'models/staging/undocumented/users.sql',
|
||||
content: `select
|
||||
id,
|
||||
email,
|
||||
created_at,
|
||||
updated_at,
|
||||
is_active
|
||||
from {{ source('app', 'users') }}
|
||||
`,
|
||||
},
|
||||
{
|
||||
path: 'models/staging/undocumented/orders.sql',
|
||||
content: `select
|
||||
id,
|
||||
user_id,
|
||||
order_date,
|
||||
total_amount,
|
||||
status
|
||||
from {{ source('app', 'orders') }}
|
||||
`,
|
||||
},
|
||||
{
|
||||
path: 'models/staging/undocumented/schema.yml',
|
||||
content: `version: 2
|
||||
|
||||
models:
|
||||
- name: users
|
||||
# Missing description and column documentation
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
- name: email
|
||||
- name: created_at
|
||||
|
||||
- name: orders
|
||||
# Missing all documentation
|
||||
`,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create a complex project structure for testing
|
||||
*/
|
||||
export function createComplexProjectStructure(): FileInput[] {
|
||||
const files: FileInput[] = [];
|
||||
|
||||
// Multiple data sources
|
||||
const dataSources = ['stripe', 'salesforce', 'postgres', 'snowplow'];
|
||||
|
||||
for (const source of dataSources) {
|
||||
files.push({
|
||||
path: `models/staging/${source}/README.md`,
|
||||
content: `# ${source} Staging Models
|
||||
|
||||
This directory contains staging models for ${source} data.
|
||||
`,
|
||||
});
|
||||
|
||||
files.push({
|
||||
path: `models/staging/${source}/schema.yml`,
|
||||
content: `version: 2
|
||||
|
||||
sources:
|
||||
- name: ${source}
|
||||
database: raw
|
||||
schema: ${source}
|
||||
tables:
|
||||
- name: table1
|
||||
- name: table2
|
||||
`,
|
||||
});
|
||||
}
|
||||
|
||||
// Multiple business domains
|
||||
const domains = ['finance', 'marketing', 'operations', 'product'];
|
||||
|
||||
for (const domain of domains) {
|
||||
files.push({
|
||||
path: `models/marts/${domain}/README.md`,
|
||||
content: `# ${domain} Data Mart
|
||||
|
||||
Business logic for ${domain} analytics.
|
||||
`,
|
||||
});
|
||||
}
|
||||
|
||||
// Analysis files
|
||||
files.push({
|
||||
path: 'analyses/customer_churn_analysis.sql',
|
||||
content: `-- Customer churn analysis
|
||||
with churned_customers as (
|
||||
select * from {{ ref('dim_customers') }}
|
||||
where churned_at is not null
|
||||
)
|
||||
select * from churned_customers
|
||||
`,
|
||||
});
|
||||
|
||||
// Snapshots
|
||||
files.push({
|
||||
path: 'snapshots/customers_snapshot.sql',
|
||||
content: `{% snapshot customers_snapshot %}
|
||||
{{
|
||||
config(
|
||||
target_schema='snapshots',
|
||||
unique_key='id',
|
||||
strategy='timestamp',
|
||||
updated_at='updated_at',
|
||||
)
|
||||
}}
|
||||
select * from {{ source('app', 'customers') }}
|
||||
{% endsnapshot %}
|
||||
`,
|
||||
});
|
||||
|
||||
return files;
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
#!/usr/bin/env tsx
|
||||
/**
|
||||
* Simple test to verify the test helpers work correctly
|
||||
* Run with: tsx packages/ai/src/workflows/docs-agent/test-helpers/simple-test.ts
|
||||
*/
|
||||
|
||||
import { addFiles } from '@buster/sandbox';
|
||||
import { TEST_MESSAGES, createTestContext, createTestWorkflowInput } from './context-helpers';
|
||||
import { generateMockDbtProject } from './mock-dbt-project';
|
||||
import { createMockSandbox } from './mock-sandbox';
|
||||
import { createFilesWithMissingDocs, createMalformedYamlFiles } from './sandbox-helpers';
|
||||
|
||||
async function runSimpleTest() {
|
||||
console.info('🧪 Running simple test of docs agent test helpers...\n');
|
||||
|
||||
try {
|
||||
// Test 1: Mock sandbox creation
|
||||
console.info('1️⃣ Testing mock sandbox creation...');
|
||||
const mockSandbox = createMockSandbox();
|
||||
console.info(`✅ Created mock sandbox with ID: ${mockSandbox.id}\n`);
|
||||
|
||||
// Test 2: Generate mock dbt project
|
||||
console.info('2️⃣ Testing mock dbt project generation...');
|
||||
const projectFiles = generateMockDbtProject({
|
||||
projectName: 'test_project',
|
||||
companyName: 'TestCo',
|
||||
});
|
||||
console.info(`✅ Generated ${projectFiles.length} project files\n`);
|
||||
|
||||
// Test 3: Upload files to mock sandbox
|
||||
console.info('3️⃣ Testing file upload to mock sandbox...');
|
||||
const uploadResult = await addFiles(mockSandbox, projectFiles, {
|
||||
baseDestination: 'dbt_test',
|
||||
});
|
||||
console.info(`✅ Upload result: ${uploadResult.success ? 'Success' : 'Failed'}`);
|
||||
console.info(` Files uploaded: ${uploadResult.uploadedFiles.length}\n`);
|
||||
|
||||
// Test 4: Create test context
|
||||
console.info('4️⃣ Testing context creation...');
|
||||
const context = createTestContext({
|
||||
sandbox: mockSandbox,
|
||||
todoList: ['Document staging models', 'Update READMEs'],
|
||||
});
|
||||
console.info(`✅ Created context with ${context.todoList.length} todos\n`);
|
||||
|
||||
// Test 5: Create workflow input
|
||||
console.info('5️⃣ Testing workflow input creation...');
|
||||
const input = createTestWorkflowInput({
|
||||
message: TEST_MESSAGES.documentAll,
|
||||
context,
|
||||
});
|
||||
console.info(`✅ Created workflow input with message: "${input.message}"\n`);
|
||||
|
||||
// Test 6: Test file variations
|
||||
console.info('6️⃣ Testing file variations...');
|
||||
const malformedFiles = createMalformedYamlFiles();
|
||||
console.info(` Created ${malformedFiles.length} malformed YAML files`);
|
||||
|
||||
const missingDocsFiles = createFilesWithMissingDocs();
|
||||
console.info(` Created ${missingDocsFiles.length} files with missing docs`);
|
||||
|
||||
// Test 7: List files in mock sandbox
|
||||
console.info('\n7️⃣ Testing file listing in mock sandbox...');
|
||||
const files = await (
|
||||
mockSandbox.fs as unknown as { listDirectory: (path: string) => Promise<string[]> }
|
||||
).listDirectory('dbt_test');
|
||||
console.info(`✅ Found ${files.length} files in sandbox`);
|
||||
console.info(' Sample files:', files.slice(0, 5).join(', '), '...\n');
|
||||
|
||||
// Cleanup
|
||||
// Mock sandbox doesn't have a close method, but we can clear the storage
|
||||
console.info('🧹 Cleaned up mock sandbox\n');
|
||||
|
||||
console.info('✅ All tests passed successfully!');
|
||||
} catch (error) {
|
||||
console.error('❌ Test failed:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run the test
|
||||
runSimpleTest()
|
||||
.then(() => {
|
||||
console.info('\n🎉 Simple test completed!');
|
||||
process.exit(0);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('\n💥 Test error:', error);
|
||||
process.exit(1);
|
||||
});
|
|
@ -145,7 +145,7 @@ export async function uploadMultipleFiles(
|
|||
: file.content
|
||||
: await fs.readFile(file.path);
|
||||
|
||||
const destination = file.destination || path.basename(file.path);
|
||||
const destination = file.destination || file.path;
|
||||
const destPath = options?.baseDestination
|
||||
? joinPaths(options.baseDestination, destination)
|
||||
: destination;
|
||||
|
@ -322,7 +322,7 @@ export async function addFiles(
|
|||
return uploadSingleFile(
|
||||
sandbox,
|
||||
fileInput.path,
|
||||
fileInput.destination || path.basename(fileInput.path),
|
||||
fileInput.destination || fileInput.path,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue