starting to work on file streaming, moved resend logic, permissions

This commit is contained in:
dal 2025-04-18 12:21:25 -06:00
parent d4b093e9a8
commit 180a291bc8
No known key found for this signature in database
GPG Key ID: 16F4B0E1E9F61122
15 changed files with 290 additions and 183 deletions

View File

@ -10,6 +10,7 @@ members = [
"libs/sql_analyzer",
"libs/search",
"libs/dataset_security",
"libs/email",
]
resolver = "2"

View File

@ -1197,6 +1197,7 @@ pub struct ModifyFilesParams {
pub files: Vec<FileModification>,
}
/// Represents the output of a file modification tool call
#[derive(Debug, Serialize, Deserialize)]
pub struct ModifyFilesOutput {
pub message: String,

View File

@ -28,6 +28,7 @@ use super::{
common::{generate_deterministic_uuid, validate_metric_ids},
file_types::file::FileWithId,
FileModificationTool,
create_metrics::FailedFileCreation,
};
#[derive(Debug, Serialize, Deserialize, Clone)]
@ -46,6 +47,7 @@ pub struct CreateDashboardFilesOutput {
pub message: String,
pub duration: i64,
pub files: Vec<FileWithId>,
pub failed_files: Vec<FailedFileCreation>,
}
#[derive(Debug, Serialize, Deserialize)]
@ -150,7 +152,7 @@ impl ToolExecutor for CreateDashboardFilesTool {
dashboard_ymls.push(dashboard_yml);
}
Err(e) => {
failed_files.push((file.name, e));
failed_files.push(FailedFileCreation { name: file.name, error: e });
}
}
}
@ -261,10 +263,10 @@ impl ToolExecutor for CreateDashboardFilesTool {
}
Err(e) => {
failed_files.extend(dashboard_records.iter().map(|r| {
(
r.file_name.clone(),
format!("Failed to create dashboard file: {}", e),
)
FailedFileCreation {
name: r.file_name.clone(),
error: format!("Failed to create dashboard file: {}", e),
}
}));
}
}
@ -287,7 +289,7 @@ impl ToolExecutor for CreateDashboardFilesTool {
let failures: Vec<String> = failed_files
.iter()
.map(|(name, error)| format!("Failed to create '{}': {}", name, error))
.map(|failure| format!("Failed to create '{}': {}", failure.name, failure.error))
.collect();
if failures.len() == 1 {
@ -325,6 +327,7 @@ impl ToolExecutor for CreateDashboardFilesTool {
message,
duration,
files: created_files,
failed_files,
})
}

View File

@ -42,11 +42,18 @@ pub struct CreateMetricFilesParams {
pub files: Vec<MetricFileParams>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct FailedFileCreation {
pub name: String,
pub error: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateMetricFilesOutput {
pub message: String,
pub duration: i64,
pub files: Vec<FileWithId>,
pub failed_files: Vec<FailedFileCreation>,
}
#[derive(Debug, Serialize, Deserialize)]
@ -120,7 +127,7 @@ impl ToolExecutor for CreateMetricFilesTool {
results_vec.push((message, results));
}
Err(e) => {
failed_files.push((file_name, e));
failed_files.push(FailedFileCreation { name: file_name, error: e.to_string() });
}
}
}
@ -195,10 +202,10 @@ impl ToolExecutor for CreateMetricFilesTool {
}
Err(e) => {
failed_files.extend(metric_records.iter().map(|r| {
(
r.file_name.clone(),
format!("Failed to create metric file: {}", e),
)
FailedFileCreation {
name: r.file_name.clone(),
error: format!("Failed to create metric file: {}", e),
}
}));
}
}
@ -218,7 +225,7 @@ impl ToolExecutor for CreateMetricFilesTool {
let failures: Vec<String> = failed_files
.iter()
.map(|(name, error)| format!("Failed to create '{}': {}.\n\nPlease recreate the metric from scratch rather than attempting to modify. This error could be due to:\n- Using a dataset that doesn't exist (please reevaluate the available datasets in the chat conversation)\n- Invalid configuration in the metric file\n- Special characters in the metric name or SQL query\n- Syntax errors in the SQL query", name, error))
.map(|failure| format!("Failed to create '{}': {}.\n\nPlease recreate the metric from scratch rather than attempting to modify. This error could be due to:\n- Using a dataset that doesn't exist (please reevaluate the available datasets in the chat conversation)\n- Invalid configuration in the metric file\n- Special characters in the metric name or SQL query\n- Syntax errors in the SQL query", failure.name, failure.error))
.collect();
if failures.len() == 1 {
@ -256,6 +263,7 @@ impl ToolExecutor for CreateMetricFilesTool {
message,
duration,
files: created_files,
failed_files,
})
}

View File

@ -322,12 +322,21 @@ impl ToolExecutor for ModifyDashboardFilesTool {
// Generate output
let duration = start_time.elapsed().as_millis() as i64;
// Construct message based on success/failure counts
let successes_count = batch.files.len();
let failures_count = batch.failed_updates.len();
let message = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Successfully modified {} dashboard file{}.", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("Failed to modify {} dashboard file{}.", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Successfully modified {} dashboard file{}, {} failed.", s, if s == 1 { "" } else { "s" }, f),
_ => "No dashboard files were processed.".to_string(),
};
let mut output = ModifyFilesOutput {
message: format!(
"Modified {} dashboard files and created new versions. {} failures.",
batch.files.len(),
batch.failed_updates.len()
),
// Use the dynamically generated message
message,
duration,
files: Vec::new(),
failed_files: Vec::new(),

View File

@ -386,12 +386,18 @@ impl ToolExecutor for ModifyMetricFilesTool {
}
// Construct final output
let successes_count = batch.files.len();
let failures_count = batch.failed_updates.len();
let message = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Successfully modified {} metric file{}.", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("Failed to modify {} metric file{}.", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Successfully modified {} metric file{}, {} failed.", s, if s == 1 { "" } else { "s" }, f),
_ => "No metric files were processed.".to_string(),
};
let mut output = ModifyFilesOutput {
message: format!(
"Modified {} metric files and created new versions. {} failures.",
batch.files.len(),
batch.failed_updates.len()
),
message,
duration,
files: Vec::new(),
failed_files: Vec::new(),

View File

@ -257,21 +257,28 @@ where
pub async fn has_dataset_access(user_id: &Uuid, dataset_id: &Uuid) -> Result<bool> {
let mut conn = get_pg_pool().get().await.context("DB Error")?; // Get initial connection
// --- Check if Dataset exists and get Organization ID ---
let dataset_org = datasets::table
// --- Check if Dataset exists and get Organization ID and deleted status ---
let dataset_info = datasets::table
.filter(datasets::id.eq(dataset_id))
.filter(datasets::deleted_at.is_null())
.select(datasets::organization_id)
.first::<Uuid>(&mut conn)
// Remove the deleted_at filter here to check status later
.select((datasets::organization_id, datasets::deleted_at))
.first::<(Uuid, Option<DateTime<Utc>>)>(&mut conn)
.await;
let organization_id = match dataset_org {
Ok(org_id) => org_id,
Err(diesel::NotFound) => return Ok(false), // Dataset doesn't exist or is deleted
Err(e) => return Err(e).context("Failed to check dataset existence"),
let (organization_id, deleted_at_status) = match dataset_info {
Ok((org_id, deleted_at)) => (org_id, deleted_at),
Err(diesel::NotFound) => return Ok(false), // Dataset doesn't exist
Err(e) => return Err(e).context("Failed to fetch dataset info"),
};
// --- Check Admin/Querier Access ---
// --- Universal Check: If dataset is deleted, NO ONE has access ---
if deleted_at_status.is_some() {
return Ok(false);
}
// --- Dataset is NOT deleted, proceed with access checks ---
// Check Admin/Querier Access
let admin_access = users_to_organizations::table
.filter(users_to_organizations::user_id.eq(user_id))
.filter(users_to_organizations::organization_id.eq(organization_id))
@ -287,15 +294,18 @@ pub async fn has_dataset_access(user_id: &Uuid, dataset_id: &Uuid) -> Result<boo
| UserOrganizationRole::DataAdmin
| UserOrganizationRole::Querier
) {
// Admins/Queriers have access to non-deleted datasets in their org
return Ok(true);
}
} else if !matches!(admin_access, Err(diesel::NotFound)) {
// Propagate unexpected errors
// Explicitly convert diesel::Error to anyhow::Error
// Propagate unexpected errors from role check
return Err(anyhow::Error::from(admin_access.err().unwrap()))
.context("Error checking admin access");
}
// --- If not Admin/Querier, proceed with detailed permission checks ---
// (No need to check deleted_at again here)
// Drop initial connection before spawning tasks
drop(conn);

34
api/libs/email/Cargo.toml Normal file
View File

@ -0,0 +1,34 @@
[package]
name = "email"
version = "0.1.0"
edition = "2021"
# Dependencies should be inherited from workspace
[dependencies]
anyhow = { workspace = true }
chrono = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
diesel = { workspace = true }
diesel-async = { workspace = true }
# Added dependencies from moved resend logic
resend-rs = { workspace = true }
lazy_static = { workspace = true }
html-escape = { workspace = true }
# Add other workspace dependencies as needed (e.g., related to email sending like reqwest or specific email crates)
# reqwest = { workspace = true, features = ["json"] }
# Development dependencies
[dev-dependencies]
tokio-test = { workspace = true }
# Added test dependency
dotenv = { workspace = true }
# Add other workspace dev dependencies as needed
# Feature flags
[features]
default = []
# Define library-specific features here

View File

@ -217,4 +217,4 @@
</div>
</center>
</body>
</html>
</html>

20
api/libs/email/src/lib.rs Normal file
View File

@ -0,0 +1,20 @@
// //! Email library documentation
// //!
// //! This library contains logic related to sending emails, including invites and notifications, using Resend.
pub use anyhow::{Result, Error};
pub mod resend;
// // pub mod models; // Consider moving structs like CollectionInvite etc. here if they grow complex
// // pub mod utils;
// // mod errors;
// Re-exports public API from the resend module
pub use resend::{send_email, EmailType, CollectionInvite, DashboardInvite, ThreadInvite, InviteToBuster};
// // Example placeholder for where the resend logic might go
// pub async fn resend_email(/* parameters */) -> Result<()> {
// // Implementation to be moved here
// tracing::info!("Resend email logic placeholder");
// Ok(())
// }

View File

@ -6,11 +6,13 @@ use html_escape::encode_text as escape_html;
use resend_rs::{types::CreateEmailBaseOptions, Resend};
lazy_static::lazy_static! {
// TODO: Consider injecting these via a config struct instead of static env vars
static ref RESEND_API_KEY: String = env::var("RESEND_API_KEY").expect("RESEND_API_KEY must be set");
static ref RESEND_CLIENT: Resend = Resend::new(&RESEND_API_KEY);
static ref BUSTER_URL: String = env::var("BUSTER_URL").expect("BUSTER_URL must be set");
}
#[derive(Debug, Clone)] // Added derives for potential broader use
pub struct CollectionInvite {
pub collection_name: String,
pub collection_id: Uuid,
@ -18,6 +20,7 @@ pub struct CollectionInvite {
pub new_user: bool,
}
#[derive(Debug, Clone)]
pub struct DashboardInvite {
pub dashboard_name: String,
pub dashboard_id: Uuid,
@ -25,6 +28,7 @@ pub struct DashboardInvite {
pub new_user: bool,
}
#[derive(Debug, Clone)]
pub struct ThreadInvite {
pub thread_name: String,
pub thread_id: Uuid,
@ -32,11 +36,13 @@ pub struct ThreadInvite {
pub new_user: bool,
}
#[derive(Debug, Clone)]
pub struct InviteToBuster {
pub inviter_name: String,
pub organization_name: String,
}
#[derive(Debug, Clone)] // Added derives
pub enum EmailType {
CollectionInvite(CollectionInvite),
DashboardInvite(DashboardInvite),
@ -51,6 +57,7 @@ struct EmailParams {
button_text: &'static str,
}
// Adjusted path for include_str!
const EMAIL_TEMPLATE: &'static str = include_str!("email_template.html");
pub async fn send_email(to_addresses: HashSet<String>, email_type: EmailType) -> Result<()> {
@ -74,16 +81,20 @@ pub async fn send_email(to_addresses: HashSet<String>, email_type: EmailType) ->
let from = "Buster <buster@mail.buster.so>";
// Consider error handling or collecting results if sending individual emails fails
for to_address in to_addresses {
let email =
CreateEmailBaseOptions::new(from, vec![to_address], email_params.subject.clone())
CreateEmailBaseOptions::new(from, vec![to_address.clone()], email_params.subject.clone())
.with_html(&email_html);
// Cloning client and email for the spawned task
let client = RESEND_CLIENT.clone();
tokio::spawn(async move {
match RESEND_CLIENT.emails.send(email).await {
match client.emails.send(email).await {
Ok(_) => (),
Err(e) => {
tracing::error!("Error sending email: {e}");
// Use structured logging
tracing::error!(error = %e, email_recipient = %to_address, "Error sending email");
}
}
});
@ -93,16 +104,16 @@ pub async fn send_email(to_addresses: HashSet<String>, email_type: EmailType) ->
}
fn create_collection_invite_params(collection_invite: CollectionInvite) -> EmailParams {
let email_params = match collection_invite.new_user {
match collection_invite.new_user {
true => EmailParams {
subject: format!(
"{invitee_name} has shared {collection_name} with you",
invitee_name = collection_invite.inviter_name,
"{inviter_name} has shared {collection_name} with you",
inviter_name = collection_invite.inviter_name,
collection_name = collection_invite.collection_name
),
message: format!(
"{invitee_name} has shared {collection_name} with you. To view this collection, please create an account.",
invitee_name = collection_invite.inviter_name,
"{inviter_name} has shared {collection_name} with you. To view this collection, please create an account.",
inviter_name = collection_invite.inviter_name,
collection_name = collection_invite.collection_name
),
button_link: format!(
@ -114,13 +125,13 @@ fn create_collection_invite_params(collection_invite: CollectionInvite) -> Email
},
false => EmailParams {
subject: format!(
"{invitee_name} has shared {collection_name} with you",
invitee_name = collection_invite.inviter_name,
"{inviter_name} has shared {collection_name} with you",
inviter_name = collection_invite.inviter_name,
collection_name = collection_invite.collection_name
),
message: format!(
"{invitee_name} has shared {collection_name} with you",
invitee_name = collection_invite.inviter_name,
"{inviter_name} has shared {collection_name} with you",
inviter_name = collection_invite.inviter_name,
collection_name = collection_invite.collection_name
),
button_link: format!(
@ -130,13 +141,11 @@ fn create_collection_invite_params(collection_invite: CollectionInvite) -> Email
),
button_text: "View Collection",
},
};
email_params
}
}
fn create_dashboard_invite_params(dashboard_invite: DashboardInvite) -> EmailParams {
let email_params = match dashboard_invite.new_user {
match dashboard_invite.new_user {
true => EmailParams {
subject: format!(
"{inviter_name} has invited you to {dashboard_name}",
@ -173,13 +182,11 @@ fn create_dashboard_invite_params(dashboard_invite: DashboardInvite) -> EmailPar
),
button_text: "View Dashboard",
},
};
email_params
}
}
fn create_thread_invite_params(thread_invite: ThreadInvite) -> EmailParams {
let email_params = match thread_invite.new_user {
match thread_invite.new_user {
true => EmailParams {
subject: format!(
"{inviter_name} has invited you to view the metric: {thread_name}",
@ -216,9 +223,7 @@ fn create_thread_invite_params(thread_invite: ThreadInvite) -> EmailParams {
),
button_text: "View Metric",
},
};
email_params
}
}
fn create_invite_to_buster_params(invite_to_buster: InviteToBuster) -> EmailParams {
@ -241,52 +246,4 @@ fn create_invite_to_buster_params(invite_to_buster: InviteToBuster) -> EmailPara
}
}
#[cfg(test)]
mod tests {
use super::*;
use dotenv::dotenv;
#[tokio::test]
async fn test_send_email_to_existing_users() {
dotenv().ok();
let to_addresses = HashSet::from([
"dallin@buster.so".to_string(),
]);
let email_type = EmailType::CollectionInvite(CollectionInvite {
collection_name: "Test Collection <script>alert('xss')</script>".to_string(),
collection_id: Uuid::new_v4(),
inviter_name: "Dallin Bentley <b>test</b>".to_string(),
new_user: false,
});
match send_email(to_addresses, email_type).await {
Ok(_) => assert!(true),
Err(e) => {
println!("Error sending email: {e}");
assert!(false)
}
}
}
#[tokio::test]
async fn test_send_email_to_new_users() {
dotenv().ok();
let to_addresses = HashSet::from([
"dallin@buster.so".to_string(),
]);
let email_type = EmailType::CollectionInvite(CollectionInvite {
collection_name: "Test Collection".to_string(),
collection_id: Uuid::new_v4(),
inviter_name: "Dallin Bentley".to_string(),
new_user: true,
});
match send_email(to_addresses, email_type).await {
Ok(_) => assert!(true),
Err(e) => {
println!("Error sending email: {e}");
assert!(false)
}
}
}
}
// Tests are moved to libs/email/tests/resend_tests.rs

View File

@ -1,3 +1,4 @@
use agents::tools::file_tools::common::ModifyFilesOutput;
use dashmap::DashMap;
use middleware::AuthenticatedUser;
use std::collections::HashSet;
@ -7,8 +8,9 @@ use std::sync::Arc;
use agents::{
tools::{
file_tools::{
common::ModifyFilesOutput, create_dashboards::CreateDashboardFilesOutput,
create_metrics::CreateMetricFilesOutput, search_data_catalog::SearchDataCatalogOutput,
create_dashboards::CreateDashboardFilesOutput,
create_metrics::{CreateMetricFilesOutput}, // Alias to avoid name clash
search_data_catalog::SearchDataCatalogOutput,
},
// Remove the old import
// planning_tools::CreatePlanOutput,
@ -1580,33 +1582,48 @@ fn tool_create_plan(id: String, content: String, elapsed_duration: Duration) ->
// Update tool_create_metrics to require ID and accept duration
fn tool_create_metrics(id: String, content: String, delta_duration: Duration) -> Result<Vec<BusterReasoningMessage>> {
// Parse the CreateMetricFilesOutput from content
// Parse the actual CreateMetricFilesOutput from content
let create_metrics_result = match serde_json::from_str::<CreateMetricFilesOutput>(&content) {
Ok(result) => result,
Err(e) => {
println!("Failed to parse CreateMetricFilesOutput: {:?}", e);
// Return an error reasoning message as a File type
// Return a generic failure message if parsing the whole result fails
return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: "Failed to Create Metrics".to_string(),
title: "Failed to process metric creation results".to_string(),
secondary_title: format!("Error: {}", e),
status: "failed".to_string(), // Set status to failed
status: "failed".to_string(),
file_ids: vec![],
files: HashMap::new(),
})]);
}
};
// Remove internal duration calculation
// let duration = (create_metrics_result.duration as f64 / 1000.0 * 10.0).round() / 10.0;
let files_count = create_metrics_result.files.len();
// Use the lengths of the actual fields from the parsed output
let successes_count = create_metrics_result.files.len();
let failures_count = create_metrics_result.failed_files.len();
// Create a map of files
let title = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Created {} metric{}", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("{} metric{} failed", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Created {} metric{}, {} failed", s, if s == 1 { "" } else { "s" }, f),
// Should not happen if parsing succeeded, but handle gracefully
_ => "Processed metric creation".to_string(),
};
let status = if successes_count == 0 && failures_count > 0 {
"failed".to_string()
} else {
"completed".to_string() // Mark completed even if some failed
};
// Create a map of successfully created files
let mut files_map = std::collections::HashMap::new();
let mut file_ids = Vec::new();
// Process each file
// Process each successful file from the actual output
for file in create_metrics_result.files {
let file_id = file.id.to_string();
file_ids.push(file_id.clone());
@ -1615,8 +1632,8 @@ fn tool_create_metrics(id: String, content: String, delta_duration: Duration) ->
id: file_id.clone(),
file_type: "metric".to_string(),
file_name: file.name.clone(),
version_number: 1,
status: "completed".to_string(),
version_number: 1, // Assuming version 1 for new files
status: "completed".to_string(), // Status for this specific file
file: BusterFileContent {
text: Some(file.yml_content),
text_chunk: None,
@ -1628,47 +1645,63 @@ fn tool_create_metrics(id: String, content: String, delta_duration: Duration) ->
files_map.insert(file_id, buster_file);
}
// Create the BusterReasoningFile using elapsed_duration
let buster_file = BusterReasoningMessage::File(BusterReasoningFile {
// Create the BusterReasoningFile using delta_duration and the new title/status
let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: format!("Created {} metric files", files_count),
title,
secondary_title: format!("{} seconds", delta_duration.as_secs()), // Use delta_duration
status: "completed".to_string(),
file_ids,
files: files_map,
status, // Use calculated status
file_ids, // Only IDs of successful files
files: files_map, // Only details of successful files
});
Ok(vec![buster_file])
Ok(vec![buster_file_message])
}
// Update tool_modify_metrics to require ID and accept duration
fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) -> Result<Vec<BusterReasoningMessage>> {
// Parse the ModifyFilesOutput from content
// Parse the actual ModifyMetricsFilesOutput from content
let modify_metrics_result = match serde_json::from_str::<ModifyFilesOutput>(&content) {
Ok(result) => result,
Err(e) => {
tracing::error!("Failed to parse ModifyFilesOutput: {:?}", e);
// Return an error reasoning message as a File type
tracing::error!("Failed to parse ModifyMetricsFilesOutput: {:?}", e);
// Return a generic failure message if parsing the whole result fails
return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: "Failed to Modify Metrics".to_string(),
title: "Failed to process metric modification results".to_string(),
secondary_title: format!("Error: {}", e),
status: "failed".to_string(), // Set status to failed
status: "failed".to_string(),
file_ids: vec![],
files: HashMap::new(),
})]);
}
};
let files_count = modify_metrics_result.files.len();
// Use the lengths of the actual fields from the parsed output
let successes_count = modify_metrics_result.files.len();
let failures_count = modify_metrics_result.failed_files.len();
// Create a map of files
let title = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Modified {} metric{}", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("{} metric modification{} failed", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Modified {} metric{}, {} failed", s, if s == 1 { "" } else { "s" }, f),
_ => "Processed metric modification".to_string(),
};
let status = if successes_count == 0 && failures_count > 0 {
"failed".to_string()
} else {
"completed".to_string() // Mark completed even if some failed
};
// Create a map of successfully modified files
let mut files_map = std::collections::HashMap::new();
let mut file_ids = Vec::new();
// Process each file
// Process each successful file from the actual output
for file in modify_metrics_result.files {
let file_id = file.id.to_string();
file_ids.push(file_id.clone());
@ -1676,9 +1709,9 @@ fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) ->
let buster_file = BusterFile {
id: file_id.clone(),
file_type: "metric".to_string(),
file_name: file.name.clone(),
version_number: file.version_number,
status: "completed".to_string(),
file_name: file.name.clone(),
version_number: file.version_number, // Use version from result
status: "completed".to_string(), // Status for this specific file
file: BusterFileContent {
text: Some(file.yml_content),
text_chunk: None,
@ -1690,50 +1723,63 @@ fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) ->
files_map.insert(file_id, buster_file);
}
// Create the BusterReasoningFile using elapsed_duration
let buster_file = BusterReasoningMessage::File(BusterReasoningFile {
// Create the BusterReasoningFile using delta_duration and the new title/status
let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: format!("Modified {} metric file{}", files_count, if files_count == 1 { "" } else { "s" }),
title,
secondary_title: format!("{} seconds", delta_duration.as_secs()),
status: "completed".to_string(),
file_ids,
files: files_map,
status, // Use calculated status
file_ids, // Only IDs of successful files
files: files_map, // Only details of successful files
});
Ok(vec![buster_file])
Ok(vec![buster_file_message])
}
// Update tool_create_dashboards to require ID and accept duration
fn tool_create_dashboards(id: String, content: String, delta_duration: Duration) -> Result<Vec<BusterReasoningMessage>> {
// Parse the CreateDashboardFilesOutput from content
// Parse the actual CreateDashboardFilesOutput from content
let create_dashboards_result =
match serde_json::from_str::<CreateDashboardFilesOutput>(&content) {
Ok(result) => result,
Err(e) => {
println!("Failed to parse CreateDashboardFilesOutput: {:?}", e);
// Return an error reasoning message as a File type
// Return a generic failure message if parsing the whole result fails
return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: "Failed to Create Dashboards".to_string(),
title: "Failed to process dashboard creation results".to_string(),
secondary_title: format!("Error: {}", e),
status: "failed".to_string(), // Set status to failed
status: "failed".to_string(),
file_ids: vec![],
files: HashMap::new(),
})]);
}
};
// Remove internal duration calculation
// let duration = (create_dashboards_result.duration as f64 / 1000.0 * 10.0).round() / 10.0;
let files_count = create_dashboards_result.files.len();
// Use the lengths of the actual fields from the parsed output
let successes_count = create_dashboards_result.files.len();
let failures_count = create_dashboards_result.failed_files.len();
// Create a map of files
let title = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Created {} dashboard{}", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("{} dashboard{} failed", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Created {} dashboard{}, {} failed", s, if s == 1 { "" } else { "s" }, f),
_ => "Processed dashboard creation".to_string(),
};
let status = if successes_count == 0 && failures_count > 0 {
"failed".to_string()
} else {
"completed".to_string() // Mark completed even if some failed
};
// Create a map of successfully created files
let mut files_map = std::collections::HashMap::new();
let mut file_ids = Vec::new();
// Process each file
// Process each successful file from the actual output
for file in create_dashboards_result.files {
let file_id = file.id.to_string();
file_ids.push(file_id.clone());
@ -1742,8 +1788,8 @@ fn tool_create_dashboards(id: String, content: String, delta_duration: Duration)
id: file_id.clone(),
file_type: "dashboard".to_string(),
file_name: file.name.clone(),
version_number: 1,
status: "completed".to_string(),
version_number: 1, // Assuming version 1 for new files
status: "completed".to_string(), // Status for this specific file
file: BusterFileContent {
text: Some(file.yml_content),
text_chunk: None,
@ -1755,47 +1801,62 @@ fn tool_create_dashboards(id: String, content: String, delta_duration: Duration)
files_map.insert(file_id, buster_file);
}
// Create the BusterReasoningFile using elapsed_duration
let buster_file = BusterReasoningMessage::File(BusterReasoningFile {
// Create the BusterReasoningFile using delta_duration and the new title/status
let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: format!("Created {} dashboard files", files_count),
title,
secondary_title: format!("{} seconds", delta_duration.as_secs()), // Use delta_duration
status: "completed".to_string(),
file_ids,
files: files_map,
status, // Use calculated status
file_ids, // Only IDs of successful files
files: files_map, // Only details of successful files
});
Ok(vec![buster_file])
Ok(vec![buster_file_message])
}
// Update tool_modify_dashboards to require ID and accept duration
fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration) -> Result<Vec<BusterReasoningMessage>> {
// Parse the ModifyFilesOutput from content
// Parse the actual ModifyDashboardsFilesOutput from content
let modify_dashboards_result = match serde_json::from_str::<ModifyFilesOutput>(&content) {
Ok(result) => result,
Err(e) => {
tracing::error!("Failed to parse ModifyFilesOutput: {:?}", e);
// Return an error reasoning message as a File type
tracing::error!("Failed to parse ModifyDashboardsFilesOutput: {:?}", e);
// Return a generic failure message if parsing the whole result fails
return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: "Failed to Modify Dashboards".to_string(),
title: "Failed to process dashboard modification results".to_string(),
secondary_title: format!("Error: {}", e),
status: "failed".to_string(), // Set status to failed
status: "failed".to_string(),
file_ids: vec![],
files: HashMap::new(),
})]);
}
};
let files_count = modify_dashboards_result.files.len();
// Use the lengths of the actual fields from the parsed output
let successes_count = modify_dashboards_result.files.len();
let failures_count = modify_dashboards_result.failed_files.len();
// Create a map of files
let title = match (successes_count, failures_count) {
(s, 0) if s > 0 => format!("Modified {} dashboard{}", s, if s == 1 { "" } else { "s" }),
(0, f) if f > 0 => format!("{} dashboard modification{} failed", f, if f == 1 { "" } else { "s" }),
(s, f) if s > 0 && f > 0 => format!("Modified {} dashboard{}, {} failed", s, if s == 1 { "" } else { "s" }, f),
_ => "Processed dashboard modification".to_string(),
};
let status = if successes_count == 0 && failures_count > 0 {
"failed".to_string()
} else {
"completed".to_string() // Mark completed even if some failed
};
// Create a map of successfully modified files
let mut files_map = std::collections::HashMap::new();
let mut file_ids = Vec::new();
// Process each file
// Process each successful file from the actual output
for file in modify_dashboards_result.files {
let file_id = file.id.to_string();
file_ids.push(file_id.clone());
@ -1804,8 +1865,8 @@ fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration)
id: file_id.clone(),
file_type: "dashboard".to_string(),
file_name: file.name.clone(),
version_number: file.version_number,
status: "completed".to_string(),
version_number: file.version_number, // Use version from result
status: "completed".to_string(), // Status for this specific file
file: BusterFileContent {
text: Some(file.yml_content),
text_chunk: None,
@ -1817,18 +1878,18 @@ fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration)
files_map.insert(file_id, buster_file);
}
// Create the BusterReasoningFile using elapsed_duration
let buster_file = BusterReasoningMessage::File(BusterReasoningFile {
// Create the BusterReasoningFile using delta_duration and the new title/status
let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile {
id,
message_type: "files".to_string(),
title: format!("Modified {} dashboard file{}", files_count, if files_count == 1 { "" } else { "s" }),
title,
secondary_title: format!("{} seconds", delta_duration.as_secs()),
status: "completed".to_string(),
file_ids,
files: files_map,
status, // Use calculated status
file_ids, // Only IDs of successful files
files: files_map, // Only details of successful files
});
Ok(vec![buster_file])
Ok(vec![buster_file_message])
}
// Restore the original tool_data_catalog_search function

View File

@ -1 +0,0 @@
pub mod resend;

View File

@ -1 +0,0 @@
pub mod email;

View File

@ -1,4 +1,3 @@
pub mod clients;
pub mod security;
pub use agents::*;