diff --git a/api/Cargo.toml b/api/Cargo.toml index 2c1570d9e..295eb574b 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -10,6 +10,7 @@ members = [ "libs/sql_analyzer", "libs/search", "libs/dataset_security", + "libs/email", ] resolver = "2" diff --git a/api/libs/agents/src/tools/categories/file_tools/common.rs b/api/libs/agents/src/tools/categories/file_tools/common.rs index 76f7c9531..2241fe5a1 100644 --- a/api/libs/agents/src/tools/categories/file_tools/common.rs +++ b/api/libs/agents/src/tools/categories/file_tools/common.rs @@ -1197,6 +1197,7 @@ pub struct ModifyFilesParams { pub files: Vec, } +/// Represents the output of a file modification tool call #[derive(Debug, Serialize, Deserialize)] pub struct ModifyFilesOutput { pub message: String, diff --git a/api/libs/agents/src/tools/categories/file_tools/create_dashboards.rs b/api/libs/agents/src/tools/categories/file_tools/create_dashboards.rs index 407dc94d9..10653a38c 100644 --- a/api/libs/agents/src/tools/categories/file_tools/create_dashboards.rs +++ b/api/libs/agents/src/tools/categories/file_tools/create_dashboards.rs @@ -28,6 +28,7 @@ use super::{ common::{generate_deterministic_uuid, validate_metric_ids}, file_types::file::FileWithId, FileModificationTool, + create_metrics::FailedFileCreation, }; #[derive(Debug, Serialize, Deserialize, Clone)] @@ -46,6 +47,7 @@ pub struct CreateDashboardFilesOutput { pub message: String, pub duration: i64, pub files: Vec, + pub failed_files: Vec, } #[derive(Debug, Serialize, Deserialize)] @@ -150,7 +152,7 @@ impl ToolExecutor for CreateDashboardFilesTool { dashboard_ymls.push(dashboard_yml); } Err(e) => { - failed_files.push((file.name, e)); + failed_files.push(FailedFileCreation { name: file.name, error: e }); } } } @@ -261,10 +263,10 @@ impl ToolExecutor for CreateDashboardFilesTool { } Err(e) => { failed_files.extend(dashboard_records.iter().map(|r| { - ( - r.file_name.clone(), - format!("Failed to create dashboard file: {}", e), - ) + FailedFileCreation { + name: r.file_name.clone(), + error: format!("Failed to create dashboard file: {}", e), + } })); } } @@ -287,7 +289,7 @@ impl ToolExecutor for CreateDashboardFilesTool { let failures: Vec = failed_files .iter() - .map(|(name, error)| format!("Failed to create '{}': {}", name, error)) + .map(|failure| format!("Failed to create '{}': {}", failure.name, failure.error)) .collect(); if failures.len() == 1 { @@ -325,6 +327,7 @@ impl ToolExecutor for CreateDashboardFilesTool { message, duration, files: created_files, + failed_files, }) } diff --git a/api/libs/agents/src/tools/categories/file_tools/create_metrics.rs b/api/libs/agents/src/tools/categories/file_tools/create_metrics.rs index df2efe126..720ec2c7f 100644 --- a/api/libs/agents/src/tools/categories/file_tools/create_metrics.rs +++ b/api/libs/agents/src/tools/categories/file_tools/create_metrics.rs @@ -42,11 +42,18 @@ pub struct CreateMetricFilesParams { pub files: Vec, } +#[derive(Debug, Serialize, Deserialize)] +pub struct FailedFileCreation { + pub name: String, + pub error: String, +} + #[derive(Debug, Serialize, Deserialize)] pub struct CreateMetricFilesOutput { pub message: String, pub duration: i64, pub files: Vec, + pub failed_files: Vec, } #[derive(Debug, Serialize, Deserialize)] @@ -120,7 +127,7 @@ impl ToolExecutor for CreateMetricFilesTool { results_vec.push((message, results)); } Err(e) => { - failed_files.push((file_name, e)); + failed_files.push(FailedFileCreation { name: file_name, error: e.to_string() }); } } } @@ -195,10 +202,10 @@ impl ToolExecutor for CreateMetricFilesTool { } Err(e) => { failed_files.extend(metric_records.iter().map(|r| { - ( - r.file_name.clone(), - format!("Failed to create metric file: {}", e), - ) + FailedFileCreation { + name: r.file_name.clone(), + error: format!("Failed to create metric file: {}", e), + } })); } } @@ -218,7 +225,7 @@ impl ToolExecutor for CreateMetricFilesTool { let failures: Vec = failed_files .iter() - .map(|(name, error)| format!("Failed to create '{}': {}.\n\nPlease recreate the metric from scratch rather than attempting to modify. This error could be due to:\n- Using a dataset that doesn't exist (please reevaluate the available datasets in the chat conversation)\n- Invalid configuration in the metric file\n- Special characters in the metric name or SQL query\n- Syntax errors in the SQL query", name, error)) + .map(|failure| format!("Failed to create '{}': {}.\n\nPlease recreate the metric from scratch rather than attempting to modify. This error could be due to:\n- Using a dataset that doesn't exist (please reevaluate the available datasets in the chat conversation)\n- Invalid configuration in the metric file\n- Special characters in the metric name or SQL query\n- Syntax errors in the SQL query", failure.name, failure.error)) .collect(); if failures.len() == 1 { @@ -256,6 +263,7 @@ impl ToolExecutor for CreateMetricFilesTool { message, duration, files: created_files, + failed_files, }) } diff --git a/api/libs/agents/src/tools/categories/file_tools/modify_dashboards.rs b/api/libs/agents/src/tools/categories/file_tools/modify_dashboards.rs index 3bc35ede4..3487524a4 100644 --- a/api/libs/agents/src/tools/categories/file_tools/modify_dashboards.rs +++ b/api/libs/agents/src/tools/categories/file_tools/modify_dashboards.rs @@ -322,12 +322,21 @@ impl ToolExecutor for ModifyDashboardFilesTool { // Generate output let duration = start_time.elapsed().as_millis() as i64; + + // Construct message based on success/failure counts + let successes_count = batch.files.len(); + let failures_count = batch.failed_updates.len(); + + let message = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Successfully modified {} dashboard file{}.", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("Failed to modify {} dashboard file{}.", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Successfully modified {} dashboard file{}, {} failed.", s, if s == 1 { "" } else { "s" }, f), + _ => "No dashboard files were processed.".to_string(), + }; + let mut output = ModifyFilesOutput { - message: format!( - "Modified {} dashboard files and created new versions. {} failures.", - batch.files.len(), - batch.failed_updates.len() - ), + // Use the dynamically generated message + message, duration, files: Vec::new(), failed_files: Vec::new(), diff --git a/api/libs/agents/src/tools/categories/file_tools/modify_metrics.rs b/api/libs/agents/src/tools/categories/file_tools/modify_metrics.rs index 2a7adabfb..8f078a502 100644 --- a/api/libs/agents/src/tools/categories/file_tools/modify_metrics.rs +++ b/api/libs/agents/src/tools/categories/file_tools/modify_metrics.rs @@ -386,12 +386,18 @@ impl ToolExecutor for ModifyMetricFilesTool { } // Construct final output + let successes_count = batch.files.len(); + let failures_count = batch.failed_updates.len(); + + let message = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Successfully modified {} metric file{}.", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("Failed to modify {} metric file{}.", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Successfully modified {} metric file{}, {} failed.", s, if s == 1 { "" } else { "s" }, f), + _ => "No metric files were processed.".to_string(), + }; + let mut output = ModifyFilesOutput { - message: format!( - "Modified {} metric files and created new versions. {} failures.", - batch.files.len(), - batch.failed_updates.len() - ), + message, duration, files: Vec::new(), failed_files: Vec::new(), diff --git a/api/libs/dataset_security/src/lib.rs b/api/libs/dataset_security/src/lib.rs index a5079eefc..ee1ca89dc 100644 --- a/api/libs/dataset_security/src/lib.rs +++ b/api/libs/dataset_security/src/lib.rs @@ -257,21 +257,28 @@ where pub async fn has_dataset_access(user_id: &Uuid, dataset_id: &Uuid) -> Result { let mut conn = get_pg_pool().get().await.context("DB Error")?; // Get initial connection - // --- Check if Dataset exists and get Organization ID --- - let dataset_org = datasets::table + // --- Check if Dataset exists and get Organization ID and deleted status --- + let dataset_info = datasets::table .filter(datasets::id.eq(dataset_id)) - .filter(datasets::deleted_at.is_null()) - .select(datasets::organization_id) - .first::(&mut conn) + // Remove the deleted_at filter here to check status later + .select((datasets::organization_id, datasets::deleted_at)) + .first::<(Uuid, Option>)>(&mut conn) .await; - let organization_id = match dataset_org { - Ok(org_id) => org_id, - Err(diesel::NotFound) => return Ok(false), // Dataset doesn't exist or is deleted - Err(e) => return Err(e).context("Failed to check dataset existence"), + let (organization_id, deleted_at_status) = match dataset_info { + Ok((org_id, deleted_at)) => (org_id, deleted_at), + Err(diesel::NotFound) => return Ok(false), // Dataset doesn't exist + Err(e) => return Err(e).context("Failed to fetch dataset info"), }; - // --- Check Admin/Querier Access --- + // --- Universal Check: If dataset is deleted, NO ONE has access --- + if deleted_at_status.is_some() { + return Ok(false); + } + + // --- Dataset is NOT deleted, proceed with access checks --- + + // Check Admin/Querier Access let admin_access = users_to_organizations::table .filter(users_to_organizations::user_id.eq(user_id)) .filter(users_to_organizations::organization_id.eq(organization_id)) @@ -287,15 +294,18 @@ pub async fn has_dataset_access(user_id: &Uuid, dataset_id: &Uuid) -> Result - \ No newline at end of file + \ No newline at end of file diff --git a/api/libs/email/src/lib.rs b/api/libs/email/src/lib.rs new file mode 100644 index 000000000..48bbbde6b --- /dev/null +++ b/api/libs/email/src/lib.rs @@ -0,0 +1,20 @@ +// //! Email library documentation +// //! +// //! This library contains logic related to sending emails, including invites and notifications, using Resend. + +pub use anyhow::{Result, Error}; + +pub mod resend; +// // pub mod models; // Consider moving structs like CollectionInvite etc. here if they grow complex +// // pub mod utils; +// // mod errors; + +// Re-exports public API from the resend module +pub use resend::{send_email, EmailType, CollectionInvite, DashboardInvite, ThreadInvite, InviteToBuster}; + +// // Example placeholder for where the resend logic might go +// pub async fn resend_email(/* parameters */) -> Result<()> { +// // Implementation to be moved here +// tracing::info!("Resend email logic placeholder"); +// Ok(()) +// } \ No newline at end of file diff --git a/api/server/src/utils/clients/email/resend.rs b/api/libs/email/src/resend.rs similarity index 75% rename from api/server/src/utils/clients/email/resend.rs rename to api/libs/email/src/resend.rs index a5903c89e..b51fc20a5 100644 --- a/api/server/src/utils/clients/email/resend.rs +++ b/api/libs/email/src/resend.rs @@ -6,11 +6,13 @@ use html_escape::encode_text as escape_html; use resend_rs::{types::CreateEmailBaseOptions, Resend}; lazy_static::lazy_static! { + // TODO: Consider injecting these via a config struct instead of static env vars static ref RESEND_API_KEY: String = env::var("RESEND_API_KEY").expect("RESEND_API_KEY must be set"); static ref RESEND_CLIENT: Resend = Resend::new(&RESEND_API_KEY); static ref BUSTER_URL: String = env::var("BUSTER_URL").expect("BUSTER_URL must be set"); } +#[derive(Debug, Clone)] // Added derives for potential broader use pub struct CollectionInvite { pub collection_name: String, pub collection_id: Uuid, @@ -18,6 +20,7 @@ pub struct CollectionInvite { pub new_user: bool, } +#[derive(Debug, Clone)] pub struct DashboardInvite { pub dashboard_name: String, pub dashboard_id: Uuid, @@ -25,6 +28,7 @@ pub struct DashboardInvite { pub new_user: bool, } +#[derive(Debug, Clone)] pub struct ThreadInvite { pub thread_name: String, pub thread_id: Uuid, @@ -32,11 +36,13 @@ pub struct ThreadInvite { pub new_user: bool, } +#[derive(Debug, Clone)] pub struct InviteToBuster { pub inviter_name: String, pub organization_name: String, } +#[derive(Debug, Clone)] // Added derives pub enum EmailType { CollectionInvite(CollectionInvite), DashboardInvite(DashboardInvite), @@ -51,6 +57,7 @@ struct EmailParams { button_text: &'static str, } +// Adjusted path for include_str! const EMAIL_TEMPLATE: &'static str = include_str!("email_template.html"); pub async fn send_email(to_addresses: HashSet, email_type: EmailType) -> Result<()> { @@ -74,16 +81,20 @@ pub async fn send_email(to_addresses: HashSet, email_type: EmailType) -> let from = "Buster "; + // Consider error handling or collecting results if sending individual emails fails for to_address in to_addresses { let email = - CreateEmailBaseOptions::new(from, vec![to_address], email_params.subject.clone()) + CreateEmailBaseOptions::new(from, vec![to_address.clone()], email_params.subject.clone()) .with_html(&email_html); + // Cloning client and email for the spawned task + let client = RESEND_CLIENT.clone(); tokio::spawn(async move { - match RESEND_CLIENT.emails.send(email).await { + match client.emails.send(email).await { Ok(_) => (), Err(e) => { - tracing::error!("Error sending email: {e}"); + // Use structured logging + tracing::error!(error = %e, email_recipient = %to_address, "Error sending email"); } } }); @@ -93,16 +104,16 @@ pub async fn send_email(to_addresses: HashSet, email_type: EmailType) -> } fn create_collection_invite_params(collection_invite: CollectionInvite) -> EmailParams { - let email_params = match collection_invite.new_user { + match collection_invite.new_user { true => EmailParams { subject: format!( - "{invitee_name} has shared {collection_name} with you", - invitee_name = collection_invite.inviter_name, + "{inviter_name} has shared {collection_name} with you", + inviter_name = collection_invite.inviter_name, collection_name = collection_invite.collection_name ), message: format!( - "{invitee_name} has shared {collection_name} with you. To view this collection, please create an account.", - invitee_name = collection_invite.inviter_name, + "{inviter_name} has shared {collection_name} with you. To view this collection, please create an account.", + inviter_name = collection_invite.inviter_name, collection_name = collection_invite.collection_name ), button_link: format!( @@ -114,13 +125,13 @@ fn create_collection_invite_params(collection_invite: CollectionInvite) -> Email }, false => EmailParams { subject: format!( - "{invitee_name} has shared {collection_name} with you", - invitee_name = collection_invite.inviter_name, + "{inviter_name} has shared {collection_name} with you", + inviter_name = collection_invite.inviter_name, collection_name = collection_invite.collection_name ), message: format!( - "{invitee_name} has shared {collection_name} with you", - invitee_name = collection_invite.inviter_name, + "{inviter_name} has shared {collection_name} with you", + inviter_name = collection_invite.inviter_name, collection_name = collection_invite.collection_name ), button_link: format!( @@ -130,13 +141,11 @@ fn create_collection_invite_params(collection_invite: CollectionInvite) -> Email ), button_text: "View Collection", }, - }; - - email_params + } } fn create_dashboard_invite_params(dashboard_invite: DashboardInvite) -> EmailParams { - let email_params = match dashboard_invite.new_user { + match dashboard_invite.new_user { true => EmailParams { subject: format!( "{inviter_name} has invited you to {dashboard_name}", @@ -173,13 +182,11 @@ fn create_dashboard_invite_params(dashboard_invite: DashboardInvite) -> EmailPar ), button_text: "View Dashboard", }, - }; - - email_params + } } fn create_thread_invite_params(thread_invite: ThreadInvite) -> EmailParams { - let email_params = match thread_invite.new_user { + match thread_invite.new_user { true => EmailParams { subject: format!( "{inviter_name} has invited you to view the metric: {thread_name}", @@ -216,9 +223,7 @@ fn create_thread_invite_params(thread_invite: ThreadInvite) -> EmailParams { ), button_text: "View Metric", }, - }; - - email_params + } } fn create_invite_to_buster_params(invite_to_buster: InviteToBuster) -> EmailParams { @@ -241,52 +246,4 @@ fn create_invite_to_buster_params(invite_to_buster: InviteToBuster) -> EmailPara } } -#[cfg(test)] -mod tests { - use super::*; - use dotenv::dotenv; - - #[tokio::test] - async fn test_send_email_to_existing_users() { - dotenv().ok(); - let to_addresses = HashSet::from([ - "dallin@buster.so".to_string(), - ]); - let email_type = EmailType::CollectionInvite(CollectionInvite { - collection_name: "Test Collection ".to_string(), - collection_id: Uuid::new_v4(), - inviter_name: "Dallin Bentley test".to_string(), - new_user: false, - }); - - match send_email(to_addresses, email_type).await { - Ok(_) => assert!(true), - Err(e) => { - println!("Error sending email: {e}"); - assert!(false) - } - } - } - - #[tokio::test] - async fn test_send_email_to_new_users() { - dotenv().ok(); - let to_addresses = HashSet::from([ - "dallin@buster.so".to_string(), - ]); - let email_type = EmailType::CollectionInvite(CollectionInvite { - collection_name: "Test Collection".to_string(), - collection_id: Uuid::new_v4(), - inviter_name: "Dallin Bentley".to_string(), - new_user: true, - }); - - match send_email(to_addresses, email_type).await { - Ok(_) => assert!(true), - Err(e) => { - println!("Error sending email: {e}"); - assert!(false) - } - } - } -} +// Tests are moved to libs/email/tests/resend_tests.rs \ No newline at end of file diff --git a/api/libs/handlers/src/chats/post_chat_handler.rs b/api/libs/handlers/src/chats/post_chat_handler.rs index 76cf6ec06..c9c59c353 100644 --- a/api/libs/handlers/src/chats/post_chat_handler.rs +++ b/api/libs/handlers/src/chats/post_chat_handler.rs @@ -1,3 +1,4 @@ +use agents::tools::file_tools::common::ModifyFilesOutput; use dashmap::DashMap; use middleware::AuthenticatedUser; use std::collections::HashSet; @@ -7,8 +8,9 @@ use std::sync::Arc; use agents::{ tools::{ file_tools::{ - common::ModifyFilesOutput, create_dashboards::CreateDashboardFilesOutput, - create_metrics::CreateMetricFilesOutput, search_data_catalog::SearchDataCatalogOutput, + create_dashboards::CreateDashboardFilesOutput, + create_metrics::{CreateMetricFilesOutput}, // Alias to avoid name clash + search_data_catalog::SearchDataCatalogOutput, }, // Remove the old import // planning_tools::CreatePlanOutput, @@ -1580,33 +1582,48 @@ fn tool_create_plan(id: String, content: String, elapsed_duration: Duration) -> // Update tool_create_metrics to require ID and accept duration fn tool_create_metrics(id: String, content: String, delta_duration: Duration) -> Result> { - // Parse the CreateMetricFilesOutput from content + // Parse the actual CreateMetricFilesOutput from content let create_metrics_result = match serde_json::from_str::(&content) { Ok(result) => result, Err(e) => { println!("Failed to parse CreateMetricFilesOutput: {:?}", e); - // Return an error reasoning message as a File type + // Return a generic failure message if parsing the whole result fails return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: "Failed to Create Metrics".to_string(), + title: "Failed to process metric creation results".to_string(), secondary_title: format!("Error: {}", e), - status: "failed".to_string(), // Set status to failed + status: "failed".to_string(), file_ids: vec![], files: HashMap::new(), })]); } }; - // Remove internal duration calculation - // let duration = (create_metrics_result.duration as f64 / 1000.0 * 10.0).round() / 10.0; - let files_count = create_metrics_result.files.len(); + // Use the lengths of the actual fields from the parsed output + let successes_count = create_metrics_result.files.len(); + let failures_count = create_metrics_result.failed_files.len(); - // Create a map of files + let title = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Created {} metric{}", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("{} metric{} failed", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Created {} metric{}, {} failed", s, if s == 1 { "" } else { "s" }, f), + // Should not happen if parsing succeeded, but handle gracefully + _ => "Processed metric creation".to_string(), + }; + + let status = if successes_count == 0 && failures_count > 0 { + "failed".to_string() + } else { + "completed".to_string() // Mark completed even if some failed + }; + + + // Create a map of successfully created files let mut files_map = std::collections::HashMap::new(); let mut file_ids = Vec::new(); - // Process each file + // Process each successful file from the actual output for file in create_metrics_result.files { let file_id = file.id.to_string(); file_ids.push(file_id.clone()); @@ -1615,8 +1632,8 @@ fn tool_create_metrics(id: String, content: String, delta_duration: Duration) -> id: file_id.clone(), file_type: "metric".to_string(), file_name: file.name.clone(), - version_number: 1, - status: "completed".to_string(), + version_number: 1, // Assuming version 1 for new files + status: "completed".to_string(), // Status for this specific file file: BusterFileContent { text: Some(file.yml_content), text_chunk: None, @@ -1628,47 +1645,63 @@ fn tool_create_metrics(id: String, content: String, delta_duration: Duration) -> files_map.insert(file_id, buster_file); } - // Create the BusterReasoningFile using elapsed_duration - let buster_file = BusterReasoningMessage::File(BusterReasoningFile { + // Create the BusterReasoningFile using delta_duration and the new title/status + let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: format!("Created {} metric files", files_count), + title, secondary_title: format!("{} seconds", delta_duration.as_secs()), // Use delta_duration - status: "completed".to_string(), - file_ids, - files: files_map, + status, // Use calculated status + file_ids, // Only IDs of successful files + files: files_map, // Only details of successful files }); - Ok(vec![buster_file]) + Ok(vec![buster_file_message]) } // Update tool_modify_metrics to require ID and accept duration fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) -> Result> { - // Parse the ModifyFilesOutput from content + // Parse the actual ModifyMetricsFilesOutput from content let modify_metrics_result = match serde_json::from_str::(&content) { Ok(result) => result, Err(e) => { - tracing::error!("Failed to parse ModifyFilesOutput: {:?}", e); - // Return an error reasoning message as a File type + tracing::error!("Failed to parse ModifyMetricsFilesOutput: {:?}", e); + // Return a generic failure message if parsing the whole result fails return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: "Failed to Modify Metrics".to_string(), + title: "Failed to process metric modification results".to_string(), secondary_title: format!("Error: {}", e), - status: "failed".to_string(), // Set status to failed + status: "failed".to_string(), file_ids: vec![], files: HashMap::new(), })]); } }; - let files_count = modify_metrics_result.files.len(); + // Use the lengths of the actual fields from the parsed output + let successes_count = modify_metrics_result.files.len(); + let failures_count = modify_metrics_result.failed_files.len(); - // Create a map of files + let title = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Modified {} metric{}", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("{} metric modification{} failed", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Modified {} metric{}, {} failed", s, if s == 1 { "" } else { "s" }, f), + _ => "Processed metric modification".to_string(), + }; + + let status = if successes_count == 0 && failures_count > 0 { + "failed".to_string() + } else { + "completed".to_string() // Mark completed even if some failed + }; + + + // Create a map of successfully modified files let mut files_map = std::collections::HashMap::new(); let mut file_ids = Vec::new(); - // Process each file + // Process each successful file from the actual output for file in modify_metrics_result.files { let file_id = file.id.to_string(); file_ids.push(file_id.clone()); @@ -1676,9 +1709,9 @@ fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) -> let buster_file = BusterFile { id: file_id.clone(), file_type: "metric".to_string(), - file_name: file.name.clone(), - version_number: file.version_number, - status: "completed".to_string(), + file_name: file.name.clone(), + version_number: file.version_number, // Use version from result + status: "completed".to_string(), // Status for this specific file file: BusterFileContent { text: Some(file.yml_content), text_chunk: None, @@ -1690,50 +1723,63 @@ fn tool_modify_metrics(id: String, content: String, delta_duration: Duration) -> files_map.insert(file_id, buster_file); } - // Create the BusterReasoningFile using elapsed_duration - let buster_file = BusterReasoningMessage::File(BusterReasoningFile { + // Create the BusterReasoningFile using delta_duration and the new title/status + let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: format!("Modified {} metric file{}", files_count, if files_count == 1 { "" } else { "s" }), + title, secondary_title: format!("{} seconds", delta_duration.as_secs()), - status: "completed".to_string(), - file_ids, - files: files_map, + status, // Use calculated status + file_ids, // Only IDs of successful files + files: files_map, // Only details of successful files }); - Ok(vec![buster_file]) + Ok(vec![buster_file_message]) } // Update tool_create_dashboards to require ID and accept duration fn tool_create_dashboards(id: String, content: String, delta_duration: Duration) -> Result> { - // Parse the CreateDashboardFilesOutput from content + // Parse the actual CreateDashboardFilesOutput from content let create_dashboards_result = match serde_json::from_str::(&content) { Ok(result) => result, Err(e) => { println!("Failed to parse CreateDashboardFilesOutput: {:?}", e); - // Return an error reasoning message as a File type + // Return a generic failure message if parsing the whole result fails return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: "Failed to Create Dashboards".to_string(), + title: "Failed to process dashboard creation results".to_string(), secondary_title: format!("Error: {}", e), - status: "failed".to_string(), // Set status to failed + status: "failed".to_string(), file_ids: vec![], files: HashMap::new(), })]); } }; - // Remove internal duration calculation - // let duration = (create_dashboards_result.duration as f64 / 1000.0 * 10.0).round() / 10.0; - let files_count = create_dashboards_result.files.len(); + // Use the lengths of the actual fields from the parsed output + let successes_count = create_dashboards_result.files.len(); + let failures_count = create_dashboards_result.failed_files.len(); - // Create a map of files + let title = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Created {} dashboard{}", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("{} dashboard{} failed", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Created {} dashboard{}, {} failed", s, if s == 1 { "" } else { "s" }, f), + _ => "Processed dashboard creation".to_string(), + }; + + let status = if successes_count == 0 && failures_count > 0 { + "failed".to_string() + } else { + "completed".to_string() // Mark completed even if some failed + }; + + // Create a map of successfully created files let mut files_map = std::collections::HashMap::new(); let mut file_ids = Vec::new(); - // Process each file + // Process each successful file from the actual output for file in create_dashboards_result.files { let file_id = file.id.to_string(); file_ids.push(file_id.clone()); @@ -1742,8 +1788,8 @@ fn tool_create_dashboards(id: String, content: String, delta_duration: Duration) id: file_id.clone(), file_type: "dashboard".to_string(), file_name: file.name.clone(), - version_number: 1, - status: "completed".to_string(), + version_number: 1, // Assuming version 1 for new files + status: "completed".to_string(), // Status for this specific file file: BusterFileContent { text: Some(file.yml_content), text_chunk: None, @@ -1755,47 +1801,62 @@ fn tool_create_dashboards(id: String, content: String, delta_duration: Duration) files_map.insert(file_id, buster_file); } - // Create the BusterReasoningFile using elapsed_duration - let buster_file = BusterReasoningMessage::File(BusterReasoningFile { + // Create the BusterReasoningFile using delta_duration and the new title/status + let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: format!("Created {} dashboard files", files_count), + title, secondary_title: format!("{} seconds", delta_duration.as_secs()), // Use delta_duration - status: "completed".to_string(), - file_ids, - files: files_map, + status, // Use calculated status + file_ids, // Only IDs of successful files + files: files_map, // Only details of successful files }); - Ok(vec![buster_file]) + Ok(vec![buster_file_message]) } // Update tool_modify_dashboards to require ID and accept duration fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration) -> Result> { - // Parse the ModifyFilesOutput from content + // Parse the actual ModifyDashboardsFilesOutput from content let modify_dashboards_result = match serde_json::from_str::(&content) { Ok(result) => result, Err(e) => { - tracing::error!("Failed to parse ModifyFilesOutput: {:?}", e); - // Return an error reasoning message as a File type + tracing::error!("Failed to parse ModifyDashboardsFilesOutput: {:?}", e); + // Return a generic failure message if parsing the whole result fails return Ok(vec![BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: "Failed to Modify Dashboards".to_string(), + title: "Failed to process dashboard modification results".to_string(), secondary_title: format!("Error: {}", e), - status: "failed".to_string(), // Set status to failed + status: "failed".to_string(), file_ids: vec![], files: HashMap::new(), })]); } }; - let files_count = modify_dashboards_result.files.len(); + // Use the lengths of the actual fields from the parsed output + let successes_count = modify_dashboards_result.files.len(); + let failures_count = modify_dashboards_result.failed_files.len(); - // Create a map of files + let title = match (successes_count, failures_count) { + (s, 0) if s > 0 => format!("Modified {} dashboard{}", s, if s == 1 { "" } else { "s" }), + (0, f) if f > 0 => format!("{} dashboard modification{} failed", f, if f == 1 { "" } else { "s" }), + (s, f) if s > 0 && f > 0 => format!("Modified {} dashboard{}, {} failed", s, if s == 1 { "" } else { "s" }, f), + _ => "Processed dashboard modification".to_string(), + }; + + let status = if successes_count == 0 && failures_count > 0 { + "failed".to_string() + } else { + "completed".to_string() // Mark completed even if some failed + }; + + // Create a map of successfully modified files let mut files_map = std::collections::HashMap::new(); let mut file_ids = Vec::new(); - // Process each file + // Process each successful file from the actual output for file in modify_dashboards_result.files { let file_id = file.id.to_string(); file_ids.push(file_id.clone()); @@ -1804,8 +1865,8 @@ fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration) id: file_id.clone(), file_type: "dashboard".to_string(), file_name: file.name.clone(), - version_number: file.version_number, - status: "completed".to_string(), + version_number: file.version_number, // Use version from result + status: "completed".to_string(), // Status for this specific file file: BusterFileContent { text: Some(file.yml_content), text_chunk: None, @@ -1817,18 +1878,18 @@ fn tool_modify_dashboards(id: String, content: String, delta_duration: Duration) files_map.insert(file_id, buster_file); } - // Create the BusterReasoningFile using elapsed_duration - let buster_file = BusterReasoningMessage::File(BusterReasoningFile { + // Create the BusterReasoningFile using delta_duration and the new title/status + let buster_file_message = BusterReasoningMessage::File(BusterReasoningFile { id, message_type: "files".to_string(), - title: format!("Modified {} dashboard file{}", files_count, if files_count == 1 { "" } else { "s" }), + title, secondary_title: format!("{} seconds", delta_duration.as_secs()), - status: "completed".to_string(), - file_ids, - files: files_map, + status, // Use calculated status + file_ids, // Only IDs of successful files + files: files_map, // Only details of successful files }); - Ok(vec![buster_file]) + Ok(vec![buster_file_message]) } // Restore the original tool_data_catalog_search function diff --git a/api/server/src/utils/clients/email/mod.rs b/api/server/src/utils/clients/email/mod.rs deleted file mode 100644 index 37248f80c..000000000 --- a/api/server/src/utils/clients/email/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod resend; diff --git a/api/server/src/utils/clients/mod.rs b/api/server/src/utils/clients/mod.rs deleted file mode 100644 index aa5f45d47..000000000 --- a/api/server/src/utils/clients/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod email; diff --git a/api/server/src/utils/mod.rs b/api/server/src/utils/mod.rs index 12f063180..bb9d7c4d9 100644 --- a/api/server/src/utils/mod.rs +++ b/api/server/src/utils/mod.rs @@ -1,4 +1,3 @@ -pub mod clients; pub mod security; pub use agents::*;