mirror of https://github.com/buster-so/buster.git
Merge branch 'staging' into devin/BUS-1465-1753085075
This commit is contained in:
commit
fc8ac85280
|
@ -47,6 +47,7 @@ mockito = "1.2.0"
|
|||
mockall = "0.12.1"
|
||||
bb8-redis = "0.18.0"
|
||||
indexmap = { version = "2.2.6", features = ["serde"] }
|
||||
itertools = "0.14"
|
||||
once_cell = "1.20.2"
|
||||
rustls = { version = "0.23", features = ["ring"] }
|
||||
rustls-native-certs = "0.8"
|
||||
|
|
|
@ -2,4 +2,4 @@ pub mod message_user_clarifying_question;
|
|||
pub mod done;
|
||||
|
||||
pub use message_user_clarifying_question::*;
|
||||
pub use done::*;
|
||||
pub use done::*;
|
|
@ -37,6 +37,7 @@ semantic_layer = { path = "../semantic_layer" }
|
|||
|
||||
# Add any handler-specific dependencies here
|
||||
dashmap = "5.5.3"
|
||||
itertools = { workspace = true }
|
||||
|
||||
# Add stored_values dependency
|
||||
stored_values = { path = "../stored_values" }
|
||||
|
|
|
@ -11,6 +11,7 @@ use database::{
|
|||
};
|
||||
use diesel::{ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, Queryable};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use itertools::Itertools;
|
||||
use middleware::AuthenticatedUser;
|
||||
use sharing::{check_permission_access, compute_effective_permission};
|
||||
use tracing;
|
||||
|
@ -156,7 +157,10 @@ pub async fn get_collection_handler(
|
|||
name: p.name,
|
||||
avatar_url: p.avatar_url,
|
||||
})
|
||||
.collect::<Vec<BusterShareIndividual>>(),
|
||||
.collect::<Vec<BusterShareIndividual>>()
|
||||
.into_iter()
|
||||
.sorted_by(|a, b| a.email.to_lowercase().cmp(&b.email.to_lowercase()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ use chrono::{DateTime, Utc};
|
|||
use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, QueryDsl, Queryable, Selectable};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use futures::future::join_all;
|
||||
use itertools::Itertools;
|
||||
use middleware::AuthenticatedUser;
|
||||
use serde_json::Value;
|
||||
use serde_yaml;
|
||||
|
@ -390,7 +391,10 @@ pub async fn get_dashboard_handler(
|
|||
name: p.name,
|
||||
avatar_url: p.avatar_url,
|
||||
})
|
||||
.collect::<Vec<BusterShareIndividual>>(),
|
||||
.collect::<Vec<BusterShareIndividual>>()
|
||||
.into_iter()
|
||||
.sorted_by(|a, b| a.email.to_lowercase().cmp(&b.email.to_lowercase()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ use anyhow::{anyhow, Result};
|
|||
use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, QueryDsl, Queryable};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use futures::future::join;
|
||||
use itertools::Itertools;
|
||||
use middleware::AuthenticatedUser;
|
||||
use serde_yaml;
|
||||
use sharing::asset_access_checks::check_metric_collection_access;
|
||||
|
@ -464,7 +465,10 @@ pub async fn get_metric_handler(
|
|||
name: p.name,
|
||||
avatar_url: p.avatar_url,
|
||||
})
|
||||
.collect::<Vec<crate::metrics::types::BusterShareIndividual>>(),
|
||||
.collect::<Vec<crate::metrics::types::BusterShareIndividual>>()
|
||||
.into_iter()
|
||||
.sorted_by(|a, b| a.email.to_lowercase().cmp(&b.email.to_lowercase()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
"@buster/typescript-config": "workspace:*",
|
||||
"@buster/vitest-config": "workspace:*",
|
||||
"@mastra/core": "catalog:",
|
||||
"@trigger.dev/sdk": "catalog:",
|
||||
"@trigger.dev/sdk": "4.0.0-v4-beta.24",
|
||||
"ai": "catalog:",
|
||||
"braintrust": "catalog:",
|
||||
"vitest": "catalog:",
|
||||
|
@ -36,6 +36,6 @@
|
|||
"drizzle-orm": "catalog:"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@trigger.dev/build": "catalog:"
|
||||
"@trigger.dev/build": "4.0.0-v4-beta.24"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ export const useShareCollection = () => {
|
|||
draft.individual_permissions = [
|
||||
...params.map((p) => ({ ...p })),
|
||||
...(draft.individual_permissions || [])
|
||||
];
|
||||
].sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -208,7 +208,8 @@ export const useUnshareCollection = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft: BusterCollection) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [];
|
||||
(draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [])
|
||||
.sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -231,11 +232,11 @@ export const useUpdateCollectionShare = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.map((t) => {
|
||||
(draft.individual_permissions?.map((t) => {
|
||||
const found = params.users?.find((v) => v.email === t.email);
|
||||
if (found) return { ...t, ...found };
|
||||
return t;
|
||||
}) || [];
|
||||
}) || []).sort((a, b) => a.email.localeCompare(b.email));
|
||||
|
||||
if (params.publicly_accessible !== undefined) {
|
||||
draft.publicly_accessible = params.publicly_accessible;
|
||||
|
|
|
@ -368,7 +368,7 @@ export const useShareDashboard = () => {
|
|||
avatar_url: p.avatar_url || null
|
||||
})),
|
||||
...(draft.individual_permissions || [])
|
||||
];
|
||||
].sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -398,7 +398,8 @@ export const useUnshareDashboard = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [];
|
||||
(draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [])
|
||||
.sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -419,11 +420,11 @@ export const useUpdateDashboardShare = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.map((t) => {
|
||||
(draft.individual_permissions?.map((t) => {
|
||||
const found = params.users?.find((v) => v.email === t.email);
|
||||
if (found) return { ...t, ...found };
|
||||
return t;
|
||||
}) || [];
|
||||
}) || []).sort((a, b) => a.email.localeCompare(b.email));
|
||||
|
||||
if (params.publicly_accessible !== undefined) {
|
||||
draft.publicly_accessible = params.publicly_accessible;
|
||||
|
|
|
@ -233,7 +233,7 @@ export const useShareMetric = () => {
|
|||
avatar_url: p.avatar_url || null
|
||||
})),
|
||||
...(draft.individual_permissions || [])
|
||||
];
|
||||
].sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -264,7 +264,8 @@ export const useUnshareMetric = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft: BusterMetric) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [];
|
||||
(draft.individual_permissions?.filter((t) => !variables.data.includes(t.email)) || [])
|
||||
.sort((a, b) => a.email.localeCompare(b.email));
|
||||
});
|
||||
});
|
||||
},
|
||||
|
@ -297,11 +298,11 @@ export const useUpdateMetricShare = () => {
|
|||
if (!previousData) return previousData;
|
||||
return create(previousData, (draft: BusterMetric) => {
|
||||
draft.individual_permissions =
|
||||
draft.individual_permissions?.map((t) => {
|
||||
(draft.individual_permissions?.map((t) => {
|
||||
const found = variables.params.users?.find((v) => v.email === t.email);
|
||||
if (found) return { ...t, ...found };
|
||||
return t;
|
||||
}) || [];
|
||||
}) || []).sort((a, b) => a.email.localeCompare(b.email));
|
||||
|
||||
if (variables.params.publicly_accessible !== undefined) {
|
||||
draft.publicly_accessible = variables.params.publicly_accessible;
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
import { createTool } from '@mastra/core/tools';
|
||||
import { wrapTraced } from 'braintrust';
|
||||
import { z } from 'zod';
|
||||
|
||||
// Input/Output schemas
|
||||
const idleInputSchema = z.object({
|
||||
final_response: z
|
||||
.string()
|
||||
.min(1, 'Final response is required')
|
||||
.describe(
|
||||
"The final response message to the user. **MUST** be formatted in Markdown. Use bullet points or other appropriate Markdown formatting. Do not include headers. Do not use the '•' bullet character. Do not include markdown tables."
|
||||
),
|
||||
});
|
||||
|
||||
export type IdleToolExecuteInput = z.infer<typeof idleInputSchema>;
|
||||
|
||||
/**
|
||||
* Optimistic parsing function for streaming idle tool arguments
|
||||
* Extracts the final_response field as it's being built incrementally
|
||||
*/
|
||||
export function parseStreamingArgs(
|
||||
accumulatedText: string
|
||||
): Partial<z.infer<typeof idleInputSchema>> | null {
|
||||
// Validate input type
|
||||
if (typeof accumulatedText !== 'string') {
|
||||
throw new Error(`parseStreamingArgs expects string input, got ${typeof accumulatedText}`);
|
||||
}
|
||||
|
||||
try {
|
||||
// First try to parse as complete JSON
|
||||
const parsed = JSON.parse(accumulatedText);
|
||||
return {
|
||||
final_response: parsed.final_response || undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
// Only catch JSON parse errors - let other errors bubble up
|
||||
if (error instanceof SyntaxError) {
|
||||
// JSON parsing failed - try regex extraction for partial content
|
||||
// Handle both complete and incomplete strings, accounting for escaped quotes
|
||||
const match = accumulatedText.match(/"final_response"\s*:\s*"((?:[^"\\]|\\.)*)"/);
|
||||
if (match && match[1] !== undefined) {
|
||||
// Unescape the string
|
||||
const unescaped = match[1].replace(/\\"/g, '"').replace(/\\\\/g, '\\');
|
||||
return {
|
||||
final_response: unescaped,
|
||||
};
|
||||
}
|
||||
|
||||
// Try to extract partial string that's still being built (incomplete quote)
|
||||
const partialMatch = accumulatedText.match(/"final_response"\s*:\s*"((?:[^"\\]|\\.*)*)/);
|
||||
if (partialMatch && partialMatch[1] !== undefined) {
|
||||
// Unescape the partial string
|
||||
const unescaped = partialMatch[1].replace(/\\"/g, '"').replace(/\\\\/g, '\\');
|
||||
return {
|
||||
final_response: unescaped,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
// Unexpected error - re-throw with context
|
||||
throw new Error(
|
||||
`Unexpected error in parseStreamingArgs: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const idleOutputSchema = z.object({
|
||||
success: z.boolean().describe('Whether the operation was successful'),
|
||||
});
|
||||
|
||||
type IdleOutput = z.infer<typeof idleOutputSchema>;
|
||||
|
||||
async function processIdle(_input: IdleToolExecuteInput): Promise<IdleOutput> {
|
||||
return {
|
||||
success: true,
|
||||
};
|
||||
}
|
||||
|
||||
const executeIdle = wrapTraced(
|
||||
async (input: IdleToolExecuteInput): Promise<z.infer<typeof idleOutputSchema>> => {
|
||||
return await processIdle(input);
|
||||
},
|
||||
{ name: 'idle-tool' }
|
||||
);
|
||||
|
||||
// Export the tool
|
||||
export const idleTool = createTool({
|
||||
id: 'idle',
|
||||
description:
|
||||
"Marks all remaining unfinished tasks as complete, sends a final response to the user, and enters an idle state. Use this when current work is finished but the agent should remain available for future tasks. This must be in markdown format and not use the '•' bullet character.",
|
||||
inputSchema: idleInputSchema,
|
||||
outputSchema: idleOutputSchema,
|
||||
execute: async ({ context }) => {
|
||||
return await executeIdle(context as IdleToolExecuteInput);
|
||||
},
|
||||
});
|
||||
|
||||
export default idleTool;
|
|
@ -0,0 +1,144 @@
|
|||
import { spawn } from 'node:child_process';
|
||||
|
||||
export interface BashCommandParams {
|
||||
command: string;
|
||||
description?: string | undefined;
|
||||
timeout?: number | undefined;
|
||||
}
|
||||
|
||||
export interface BashExecuteResult {
|
||||
command: string;
|
||||
stdout: string;
|
||||
stderr?: string | undefined;
|
||||
exitCode: number;
|
||||
success: boolean;
|
||||
error?: string | undefined;
|
||||
}
|
||||
|
||||
async function executeSingleBashCommand(
|
||||
command: string,
|
||||
timeout?: number
|
||||
): Promise<{
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
exitCode: number;
|
||||
}> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn('bash', ['-c', command], {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
let timeoutId: NodeJS.Timeout | undefined;
|
||||
|
||||
if (timeout) {
|
||||
timeoutId = setTimeout(() => {
|
||||
child.kill('SIGTERM');
|
||||
reject(new Error(`Command timed out after ${timeout}ms`));
|
||||
}, timeout);
|
||||
}
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
resolve({
|
||||
stdout: stdout.trim(),
|
||||
stderr: stderr.trim(),
|
||||
exitCode: code || 0,
|
||||
});
|
||||
});
|
||||
|
||||
child.on('error', (error) => {
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export async function executeBashCommandsSafely(
|
||||
commands: BashCommandParams[]
|
||||
): Promise<BashExecuteResult[]> {
|
||||
const results: BashExecuteResult[] = [];
|
||||
|
||||
for (const cmd of commands) {
|
||||
try {
|
||||
const result = await executeSingleBashCommand(cmd.command, cmd.timeout);
|
||||
|
||||
results.push({
|
||||
command: cmd.command,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr ? result.stderr : undefined,
|
||||
exitCode: result.exitCode,
|
||||
success: result.exitCode === 0,
|
||||
error: result.exitCode !== 0 ? result.stderr || 'Command failed' : undefined,
|
||||
});
|
||||
} catch (error) {
|
||||
results.push({
|
||||
command: cmd.command,
|
||||
stdout: '',
|
||||
stderr: undefined,
|
||||
exitCode: 1,
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Unknown execution error',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
export function generateBashExecuteCode(commands: BashCommandParams[]): string {
|
||||
return `
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
function executeSingleBashCommand(command, timeout) {
|
||||
try {
|
||||
const options = {
|
||||
shell: '/bin/bash',
|
||||
encoding: 'utf8',
|
||||
timeout: timeout || undefined,
|
||||
};
|
||||
|
||||
const result = spawnSync('bash', ['-c', command], options);
|
||||
|
||||
return {
|
||||
command,
|
||||
stdout: result.stdout ? result.stdout.trim() : '',
|
||||
stderr: result.stderr ? result.stderr.trim() : undefined,
|
||||
exitCode: result.status !== null ? result.status : 1,
|
||||
success: result.status === 0,
|
||||
error: result.status !== 0 ? (result.stderr ? result.stderr.trim() : 'Command failed') : undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
command,
|
||||
stdout: '',
|
||||
stderr: undefined,
|
||||
exitCode: 1,
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Unknown execution error',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function executeBashCommandsConcurrently(commands) {
|
||||
return commands.map((cmd) => executeSingleBashCommand(cmd.command, cmd.timeout));
|
||||
}
|
||||
|
||||
const commands = ${JSON.stringify(commands)};
|
||||
const results = executeBashCommandsConcurrently(commands);
|
||||
console.log(JSON.stringify(results));
|
||||
`.trim();
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
import { RuntimeContext } from '@mastra/core/runtime-context';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { z } from 'zod';
|
||||
import { type SandboxContext, SandboxContextKey } from '../../context/sandbox-context';
|
||||
import { bashExecute } from './bash-execute-tool';
|
||||
|
||||
vi.mock('@buster/sandbox', () => ({
|
||||
runTypescript: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('./bash-execute-functions', () => ({
|
||||
generateBashExecuteCode: vi.fn(),
|
||||
executeBashCommandsSafely: vi.fn(),
|
||||
}));
|
||||
|
||||
import { runTypescript } from '@buster/sandbox';
|
||||
import { executeBashCommandsSafely, generateBashExecuteCode } from './bash-execute-functions';
|
||||
|
||||
const mockRunTypescript = vi.mocked(runTypescript);
|
||||
const mockGenerateBashExecuteCode = vi.mocked(generateBashExecuteCode);
|
||||
const mockExecuteBashCommandsSafely = vi.mocked(executeBashCommandsSafely);
|
||||
|
||||
describe('bash-execute-tool', () => {
|
||||
let runtimeContext: RuntimeContext<SandboxContext>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
runtimeContext = new RuntimeContext<SandboxContext>();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('bashExecute tool', () => {
|
||||
it('should have correct tool configuration', () => {
|
||||
expect(bashExecute.id).toBe('bash_execute');
|
||||
expect(bashExecute.description).toContain('Executes bash commands');
|
||||
expect(bashExecute.inputSchema).toBeDefined();
|
||||
expect(bashExecute.outputSchema).toBeDefined();
|
||||
});
|
||||
|
||||
it('should validate input schema correctly', () => {
|
||||
const validInput = {
|
||||
commands: [
|
||||
{ command: 'echo "hello"', description: 'test command' },
|
||||
{ command: 'ls -la', timeout: 5000 },
|
||||
],
|
||||
};
|
||||
|
||||
expect(() => bashExecute.inputSchema.parse(validInput)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should execute with sandbox when available', async () => {
|
||||
const mockSandbox = { process: { codeRun: vi.fn() } };
|
||||
runtimeContext.set(SandboxContextKey.Sandbox, mockSandbox as any);
|
||||
|
||||
const input = {
|
||||
commands: [{ command: 'echo "hello"' }],
|
||||
};
|
||||
|
||||
const mockCode = 'generated typescript code';
|
||||
const mockSandboxResult = {
|
||||
result: JSON.stringify([
|
||||
{
|
||||
command: 'echo "hello"',
|
||||
stdout: 'hello',
|
||||
stderr: undefined,
|
||||
exitCode: 0,
|
||||
success: true,
|
||||
error: undefined,
|
||||
},
|
||||
]),
|
||||
exitCode: 0,
|
||||
stderr: '',
|
||||
};
|
||||
|
||||
mockGenerateBashExecuteCode.mockReturnValue(mockCode);
|
||||
mockRunTypescript.mockResolvedValue(mockSandboxResult);
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(mockGenerateBashExecuteCode).toHaveBeenCalledWith(input.commands);
|
||||
expect(mockRunTypescript).toHaveBeenCalledWith(mockSandbox, mockCode);
|
||||
expect(result.results).toHaveLength(1);
|
||||
expect(result.results[0]).toEqual({
|
||||
command: 'echo "hello"',
|
||||
stdout: 'hello',
|
||||
stderr: undefined,
|
||||
exitCode: 0,
|
||||
success: true,
|
||||
error: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('should fallback to local execution when sandbox not available', async () => {
|
||||
const input = {
|
||||
commands: [{ command: 'echo "hello"' }],
|
||||
};
|
||||
|
||||
const mockLocalResults = [
|
||||
{
|
||||
command: 'echo "hello"',
|
||||
stdout: 'hello',
|
||||
stderr: undefined,
|
||||
exitCode: 0,
|
||||
success: true,
|
||||
error: undefined,
|
||||
},
|
||||
];
|
||||
|
||||
mockExecuteBashCommandsSafely.mockResolvedValue(mockLocalResults);
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(mockExecuteBashCommandsSafely).toHaveBeenCalledWith(input.commands);
|
||||
expect(result.results).toEqual(mockLocalResults);
|
||||
});
|
||||
|
||||
it('should handle sandbox execution errors', async () => {
|
||||
const mockSandbox = { process: { codeRun: vi.fn() } };
|
||||
runtimeContext.set(SandboxContextKey.Sandbox, mockSandbox as any);
|
||||
|
||||
const input = {
|
||||
commands: [{ command: 'echo "hello"' }],
|
||||
};
|
||||
|
||||
const mockCode = 'generated typescript code';
|
||||
const mockSandboxResult = {
|
||||
result: 'error output',
|
||||
exitCode: 1,
|
||||
stderr: 'Execution failed',
|
||||
};
|
||||
|
||||
mockGenerateBashExecuteCode.mockReturnValue(mockCode);
|
||||
mockRunTypescript.mockResolvedValue(mockSandboxResult);
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(result.results).toHaveLength(1);
|
||||
expect(result.results[0]).toEqual({
|
||||
command: 'echo "hello"',
|
||||
stdout: '',
|
||||
stderr: undefined,
|
||||
exitCode: 1,
|
||||
success: false,
|
||||
error: 'Execution error: Sandbox execution failed: Execution failed',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle execution errors', async () => {
|
||||
const input = {
|
||||
commands: [{ command: 'echo "hello"' }],
|
||||
};
|
||||
|
||||
mockExecuteBashCommandsSafely.mockRejectedValue(new Error('Execution failed'));
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(result.results).toHaveLength(1);
|
||||
expect(result.results[0]?.success).toBe(false);
|
||||
expect(result.results[0]?.error).toContain('Execution error');
|
||||
});
|
||||
|
||||
it('should handle empty commands array', async () => {
|
||||
const input = { commands: [] };
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(result.results).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle JSON parse errors from sandbox', async () => {
|
||||
const mockSandbox = { process: { codeRun: vi.fn() } };
|
||||
runtimeContext.set(SandboxContextKey.Sandbox, mockSandbox as any);
|
||||
|
||||
const input = {
|
||||
commands: [{ command: 'echo "hello"' }],
|
||||
};
|
||||
|
||||
const mockCode = 'generated typescript code';
|
||||
const mockSandboxResult = {
|
||||
result: 'invalid json output',
|
||||
exitCode: 0,
|
||||
stderr: '',
|
||||
};
|
||||
|
||||
mockGenerateBashExecuteCode.mockReturnValue(mockCode);
|
||||
mockRunTypescript.mockResolvedValue(mockSandboxResult);
|
||||
|
||||
const result = await bashExecute.execute({
|
||||
context: input,
|
||||
runtimeContext,
|
||||
});
|
||||
|
||||
expect(result.results).toHaveLength(1);
|
||||
expect(result.results[0]).toEqual({
|
||||
command: 'echo "hello"',
|
||||
stdout: '',
|
||||
stderr: undefined,
|
||||
exitCode: 1,
|
||||
success: false,
|
||||
error: expect.stringContaining('Failed to parse sandbox output'),
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,113 @@
|
|||
import { runTypescript } from '@buster/sandbox';
|
||||
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
||||
import { createTool } from '@mastra/core/tools';
|
||||
import { wrapTraced } from 'braintrust';
|
||||
import { z } from 'zod';
|
||||
import { type SandboxContext, SandboxContextKey } from '../../context/sandbox-context';
|
||||
|
||||
const bashCommandSchema = z.object({
|
||||
command: z.string().describe('The bash command to execute'),
|
||||
description: z.string().optional().describe('Description of what this command does'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds'),
|
||||
});
|
||||
|
||||
const inputSchema = z.object({
|
||||
commands: z
|
||||
.union([bashCommandSchema, z.array(bashCommandSchema)])
|
||||
.describe('Single command or array of bash commands to execute'),
|
||||
});
|
||||
|
||||
const outputSchema = z.object({
|
||||
results: z.array(
|
||||
z.object({
|
||||
command: z.string(),
|
||||
stdout: z.string(),
|
||||
stderr: z.string().optional(),
|
||||
exitCode: z.number(),
|
||||
success: z.boolean(),
|
||||
error: z.string().optional(),
|
||||
})
|
||||
),
|
||||
});
|
||||
|
||||
const executeBashCommands = wrapTraced(
|
||||
async (
|
||||
input: z.infer<typeof inputSchema>,
|
||||
runtimeContext: RuntimeContext<SandboxContext>
|
||||
): Promise<z.infer<typeof outputSchema>> => {
|
||||
const commands = Array.isArray(input.commands) ? input.commands : [input.commands];
|
||||
|
||||
if (!commands || commands.length === 0) {
|
||||
return { results: [] };
|
||||
}
|
||||
|
||||
try {
|
||||
// Check if sandbox is available in runtime context
|
||||
const sandbox = runtimeContext.get(SandboxContextKey.Sandbox);
|
||||
|
||||
if (sandbox) {
|
||||
const { generateBashExecuteCode } = await import('./bash-execute-functions');
|
||||
const code = generateBashExecuteCode(commands);
|
||||
const result = await runTypescript(sandbox, code);
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
console.error('Sandbox execution failed. Exit code:', result.exitCode);
|
||||
console.error('Stderr:', result.stderr);
|
||||
console.error('Stdout:', result.result);
|
||||
throw new Error(`Sandbox execution failed: ${result.stderr || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
let bashResults: Array<{
|
||||
command: string;
|
||||
stdout: string;
|
||||
stderr?: string;
|
||||
exitCode: number;
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}>;
|
||||
try {
|
||||
bashResults = JSON.parse(result.result.trim());
|
||||
} catch (parseError) {
|
||||
console.error('Failed to parse sandbox output:', result.result);
|
||||
throw new Error(
|
||||
`Failed to parse sandbox output: ${parseError instanceof Error ? parseError.message : 'Unknown parse error'}`
|
||||
);
|
||||
}
|
||||
|
||||
return { results: bashResults };
|
||||
}
|
||||
|
||||
const { executeBashCommandsSafely } = await import('./bash-execute-functions');
|
||||
const bashResults = await executeBashCommandsSafely(commands);
|
||||
return { results: bashResults };
|
||||
} catch (error) {
|
||||
return {
|
||||
results: commands.map((cmd) => ({
|
||||
command: cmd.command,
|
||||
stdout: '',
|
||||
stderr: undefined,
|
||||
exitCode: 1,
|
||||
success: false,
|
||||
error: `Execution error: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
})),
|
||||
};
|
||||
}
|
||||
},
|
||||
{ name: 'bash-execute-tool' }
|
||||
);
|
||||
|
||||
export const bashExecute = createTool({
|
||||
id: 'bash_execute',
|
||||
description: 'Executes bash commands and captures stdout, stderr, and exit codes',
|
||||
inputSchema,
|
||||
outputSchema,
|
||||
execute: async ({
|
||||
context,
|
||||
runtimeContext,
|
||||
}: {
|
||||
context: z.infer<typeof inputSchema>;
|
||||
runtimeContext: RuntimeContext<SandboxContext>;
|
||||
}) => {
|
||||
return await executeBashCommands(context, runtimeContext);
|
||||
},
|
||||
});
|
|
@ -0,0 +1 @@
|
|||
export { bashExecute } from './bash-execute-tool';
|
|
@ -1,4 +1,5 @@
|
|||
export { doneTool } from './communication-tools/done-tool';
|
||||
export { idleTool } from './communication-tools/idle-tool';
|
||||
export { respondWithoutAnalysis } from './communication-tools/respond-without-analysis';
|
||||
export { submitThoughts } from './communication-tools/submit-thoughts-tool';
|
||||
export { messageUserClarifyingQuestion } from './communication-tools/message-user-clarifying-question';
|
||||
|
@ -13,4 +14,5 @@ export { editFiles } from './file-tools/edit-files-tool/edit-files-tool';
|
|||
export { readFiles } from './file-tools/read-files-tool/read-files-tool';
|
||||
export { createFiles } from './file-tools/create-files-tool/create-file-tool';
|
||||
export { grepSearch } from './file-tools/grep-search-tool/grep-search-tool';
|
||||
export { bashExecute } from './file-tools';
|
||||
export { deleteFiles } from './file-tools/delete-files-tool/delete-files-tool';
|
||||
|
|
|
@ -12,12 +12,9 @@ catalogs:
|
|||
'@supabase/supabase-js':
|
||||
specifier: ^2.50.0
|
||||
version: 2.50.2
|
||||
'@trigger.dev/build':
|
||||
specifier: ^4.0.0-v4-beta.23
|
||||
version: 4.0.0-v4-beta.23
|
||||
'@trigger.dev/sdk':
|
||||
specifier: ^4.0.0-v4-beta.23
|
||||
version: 4.0.0-v4-beta.23
|
||||
specifier: ^4.0.0-v4-beta.24
|
||||
version: 4.0.0-v4-beta.24
|
||||
ai:
|
||||
specifier: ^4.0.0
|
||||
version: 4.3.16
|
||||
|
@ -150,7 +147,7 @@ importers:
|
|||
version: 2.50.2
|
||||
'@trigger.dev/sdk':
|
||||
specifier: 'catalog:'
|
||||
version: 4.0.0-v4-beta.23(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)
|
||||
version: 4.0.0-v4-beta.24(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)
|
||||
ai:
|
||||
specifier: 'catalog:'
|
||||
version: 4.3.16(react@18.3.1)(zod@3.25.1)
|
||||
|
@ -212,8 +209,8 @@ importers:
|
|||
specifier: 'catalog:'
|
||||
version: 0.10.8(openapi-types@12.1.3)(react@18.3.1)(zod@3.25.1)
|
||||
'@trigger.dev/sdk':
|
||||
specifier: 'catalog:'
|
||||
version: 4.0.0-v4-beta.23(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)
|
||||
specifier: 4.0.0-v4-beta.24
|
||||
version: 4.0.0-v4-beta.24(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)
|
||||
ai:
|
||||
specifier: 'catalog:'
|
||||
version: 4.3.16(react@18.3.1)(zod@3.25.1)
|
||||
|
@ -231,8 +228,8 @@ importers:
|
|||
version: 3.25.1
|
||||
devDependencies:
|
||||
'@trigger.dev/build':
|
||||
specifier: 'catalog:'
|
||||
version: 4.0.0-v4-beta.23(typescript@5.8.3)
|
||||
specifier: 4.0.0-v4-beta.24
|
||||
version: 4.0.0-v4-beta.24(typescript@5.8.3)
|
||||
|
||||
apps/web:
|
||||
dependencies:
|
||||
|
@ -5039,16 +5036,16 @@ packages:
|
|||
resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==}
|
||||
engines: {node: '>= 10'}
|
||||
|
||||
'@trigger.dev/build@4.0.0-v4-beta.23':
|
||||
resolution: {integrity: sha512-azK1qRVIWmuu4a2iPpWspqAuJdlldxOaqLfZFbmU2gXXAgzM8xqlPc09MHm3aYbugCoXL60DIUM1D+huFNCSEQ==}
|
||||
'@trigger.dev/build@4.0.0-v4-beta.24':
|
||||
resolution: {integrity: sha512-XciG8nq40IZwHNTg9Evi9A1O4YmxtVKvU/ArYgW78EfjqApoxMZHs71p0r/g2eZCgucOLaOhzJlPjteiK4Li8Q==}
|
||||
engines: {node: '>=18.20.0'}
|
||||
|
||||
'@trigger.dev/core@4.0.0-v4-beta.23':
|
||||
resolution: {integrity: sha512-n8XPKzotMAHtZTcChdAcddCKoDhKp5ZXDU2U3tnLbIL1LAtWxvKW56fzuFBmf6e69wb9rrFL+xyOsf5YIoH/rg==}
|
||||
'@trigger.dev/core@4.0.0-v4-beta.24':
|
||||
resolution: {integrity: sha512-qMPX0J9X0XxYJLHx9/bX1TE0eVw5i09fQyVn093JXB9nzcOoT8qHN9Aq7JCF/9TQOGe4vz5haMzuZECpNpgXSw==}
|
||||
engines: {node: '>=18.20.0'}
|
||||
|
||||
'@trigger.dev/sdk@4.0.0-v4-beta.23':
|
||||
resolution: {integrity: sha512-evlvT/KzODjYT+ZrKFQxwPN2roIxKX1V96lJSB4c4+ecqPfY0INf/CdISCeVcGuGj2WZu1vlRV6jBdPm3NO0Iw==}
|
||||
'@trigger.dev/sdk@4.0.0-v4-beta.24':
|
||||
resolution: {integrity: sha512-VbpH0lpg40JNb++Xy3VVzGgPCRhFLcig+hKP6mj84BiX1+dU7y1/ohICS2bYIbjEDEcxIvEx1A0+KXLqIqkFKQ==}
|
||||
engines: {node: '>=18.20.0'}
|
||||
peerDependencies:
|
||||
ai: ^4.2.0
|
||||
|
@ -16865,9 +16862,9 @@ snapshots:
|
|||
|
||||
'@tootallnate/once@2.0.0': {}
|
||||
|
||||
'@trigger.dev/build@4.0.0-v4-beta.23(typescript@5.8.3)':
|
||||
'@trigger.dev/build@4.0.0-v4-beta.24(typescript@5.8.3)':
|
||||
dependencies:
|
||||
'@trigger.dev/core': 4.0.0-v4-beta.23
|
||||
'@trigger.dev/core': 4.0.0-v4-beta.24
|
||||
pkg-types: 1.3.1
|
||||
tinyglobby: 0.2.14
|
||||
tsconfck: 3.1.3(typescript@5.8.3)
|
||||
|
@ -16877,7 +16874,7 @@ snapshots:
|
|||
- typescript
|
||||
- utf-8-validate
|
||||
|
||||
'@trigger.dev/core@4.0.0-v4-beta.23':
|
||||
'@trigger.dev/core@4.0.0-v4-beta.24':
|
||||
dependencies:
|
||||
'@bugsnag/cuid': 3.2.1
|
||||
'@electric-sql/client': 1.0.0-beta.1
|
||||
|
@ -16918,12 +16915,12 @@ snapshots:
|
|||
- supports-color
|
||||
- utf-8-validate
|
||||
|
||||
'@trigger.dev/sdk@4.0.0-v4-beta.23(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)':
|
||||
'@trigger.dev/sdk@4.0.0-v4-beta.24(ai@4.3.16(react@18.3.1)(zod@3.25.1))(zod@3.25.1)':
|
||||
dependencies:
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@opentelemetry/api-logs': 0.52.1
|
||||
'@opentelemetry/semantic-conventions': 1.25.1
|
||||
'@trigger.dev/core': 4.0.0-v4-beta.23
|
||||
'@trigger.dev/core': 4.0.0-v4-beta.24
|
||||
chalk: 5.4.1
|
||||
cronstrue: 2.59.0
|
||||
debug: 4.4.1
|
||||
|
|
|
@ -12,8 +12,8 @@ packages:
|
|||
catalog:
|
||||
"@mastra/core": "^0.10.8"
|
||||
"@supabase/supabase-js": "^2.50.0"
|
||||
"@trigger.dev/build": "^4.0.0-v4-beta.23"
|
||||
"@trigger.dev/sdk": "^4.0.0-v4-beta.23"
|
||||
"@trigger.dev/build": "^4.0.0-v4-beta.24"
|
||||
"@trigger.dev/sdk": "^4.0.0-v4-beta.24"
|
||||
ai: "^4.0.0"
|
||||
axios: "^1.10.0"
|
||||
"braintrust": "^0.0.209"
|
||||
|
|
Loading…
Reference in New Issue