Remove commented-out file modification tools and update file handling

This commit is contained in:
dal 2025-02-24 10:45:16 -07:00
parent 5c3f3acd6a
commit 4182d83339
No known key found for this signature in database
GPG Key ID: 16F4B0E1E9F61122
16 changed files with 350 additions and 937 deletions

View File

@ -8,9 +8,7 @@ use uuid::Uuid;
use crate::routes::ws::threads_and_messages::threads_router::ThreadEvent;
use litellm::{Message, MessageProgress, ToolCall};
use crate::utils::tools::file_tools::create_files::CreateFilesOutput;
use crate::utils::tools::file_tools::file_types::file::FileEnum;
use crate::utils::tools::file_tools::modify_files::ModifyFilesParams;
use crate::utils::tools::file_tools::open_files::OpenFilesOutput;
use crate::utils::tools::file_tools::search_data_catalog::SearchDataCatalogOutput;
use crate::utils::tools::file_tools::search_files::SearchFilesOutput;
@ -258,7 +256,7 @@ fn transform_tool_message(
"search_data_catalog" => tool_data_catalog_search(id, content, progress),
"stored_values_search" => tool_stored_values_search(id, content, progress),
"search_files" => tool_file_search(id, content, progress),
"create_files" => tool_create_file(id, content, progress),
// "create_files" => tool_create_file(id, content, progress),
"modify_files" => tool_modify_file(id, content, progress),
"open_files" => tool_open_files(id, content, progress),
"send_message_to_user" => tool_send_message_to_user(id, content, progress),
@ -293,7 +291,7 @@ fn transform_assistant_tool_message(
"stored_values_search" => assistant_stored_values_search(id, progress, initial),
"search_files" => assistant_file_search(id, progress, initial),
"create_files" => assistant_create_file(id, tool_calls, progress),
"modify_files" => assistant_modify_file(id, tool_calls, progress),
// "modify_files" => assistant_modify_file(id, tool_calls, progress),
"open_files" => assistant_open_files(id, progress, initial),
"send_message_to_user" => assistant_send_message_to_user(id, tool_calls, progress),
_ => Err(anyhow::anyhow!("Unsupported tool name")),
@ -794,102 +792,102 @@ fn process_assistant_create_file(tool_call: &ToolCall) -> Result<Vec<BusterThrea
}
}
fn assistant_modify_file(
id: Option<String>,
tool_calls: Vec<ToolCall>,
progress: Option<MessageProgress>,
) -> Result<Vec<BusterThreadMessage>> {
if let Some(progress) = progress {
match progress {
MessageProgress::InProgress => {
// Try to parse the tool call arguments to get file metadata
if let Some(tool_call) = tool_calls.first() {
if let Ok(params) =
serde_json::from_str::<ModifyFilesParams>(&tool_call.function.arguments)
{
if let Some(file) = params.files.first() {
return Ok(vec![BusterThreadMessage::Thought(BusterThought {
id: id.unwrap_or_else(|| Uuid::new_v4().to_string()),
thought_type: "thought".to_string(),
thought_title: format!(
"Modifying {} file '{}'...",
file.file_type, file.file_name
),
thought_secondary_title: "".to_string(),
thoughts: None,
status: "loading".to_string(),
})]);
}
}
}
// Fall back to generic message if we can't parse the metadata
let id = id.unwrap_or_else(|| Uuid::new_v4().to_string());
// fn assistant_modify_file(
// id: Option<String>,
// tool_calls: Vec<ToolCall>,
// progress: Option<MessageProgress>,
// ) -> Result<Vec<BusterThreadMessage>> {
// if let Some(progress) = progress {
// match progress {
// MessageProgress::InProgress => {
// // Try to parse the tool call arguments to get file metadata
// if let Some(tool_call) = tool_calls.first() {
// if let Ok(params) =
// serde_json::from_str::<ModifyFilesParams>(&tool_call.function.arguments)
// {
// if let Some(file) = params.files.first() {
// return Ok(vec![BusterThreadMessage::Thought(BusterThought {
// id: id.unwrap_or_else(|| Uuid::new_v4().to_string()),
// thought_type: "thought".to_string(),
// thought_title: format!(
// "Modifying {} file '{}'...",
// file.file_type, file.file_name
// ),
// thought_secondary_title: "".to_string(),
// thoughts: None,
// status: "loading".to_string(),
// })]);
// }
// }
// }
// // Fall back to generic message if we can't parse the metadata
// let id = id.unwrap_or_else(|| Uuid::new_v4().to_string());
Ok(vec![BusterThreadMessage::Thought(BusterThought {
id,
thought_type: "thought".to_string(),
thought_title: "Modifying file...".to_string(),
thought_secondary_title: "".to_string(),
thoughts: None,
status: "loading".to_string(),
})])
}
_ => Err(anyhow::anyhow!(
"Assistant modify file only supports in progress."
)),
}
} else {
Err(anyhow::anyhow!("Assistant modify file requires progress."))
}
}
// Ok(vec![BusterThreadMessage::Thought(BusterThought {
// id,
// thought_type: "thought".to_string(),
// thought_title: "Modifying file...".to_string(),
// thought_secondary_title: "".to_string(),
// thoughts: None,
// status: "loading".to_string(),
// })])
// }
// _ => Err(anyhow::anyhow!(
// "Assistant modify file only supports in progress."
// )),
// }
// } else {
// Err(anyhow::anyhow!("Assistant modify file requires progress."))
// }
// }
fn tool_create_file(
id: Option<String>,
content: String,
progress: Option<MessageProgress>,
) -> Result<Vec<BusterThreadMessage>> {
if let Some(progress) = progress {
match progress {
MessageProgress::Complete => {
// Parse the content to get file information using CreateFilesOutput
let create_files_result = match serde_json::from_str::<CreateFilesOutput>(&content)
{
Ok(result) => result,
Err(_) => return Ok(vec![]), // Silently ignore parsing errors
};
let mut messages = Vec::new();
// fn tool_create_file(
// id: Option<String>,
// content: String,
// progress: Option<MessageProgress>,
// ) -> Result<Vec<BusterThreadMessage>> {
// if let Some(progress) = progress {
// match progress {
// MessageProgress::Complete => {
// // Parse the content to get file information using CreateFilesOutput
// let create_files_result = match serde_json::from_str::<CreateFilesOutput>(&content)
// {
// Ok(result) => result,
// Err(_) => return Ok(vec![]), // Silently ignore parsing errors
// };
// let mut messages = Vec::new();
for file in create_files_result.files {
let (name, file_type, content) = (file.name, file.file_type, file.yml_content);
// for file in create_files_result.files {
// let (name, file_type, content) = (file.name, file.file_type, file.yml_content);
let mut current_lines = Vec::new();
for (i, line) in content.lines().enumerate() {
current_lines.push(BusterFileLine {
line_number: i + 1,
text: line.to_string(),
});
}
// let mut current_lines = Vec::new();
// for (i, line) in content.lines().enumerate() {
// current_lines.push(BusterFileLine {
// line_number: i + 1,
// text: line.to_string(),
// });
// }
messages.push(BusterThreadMessage::File(BusterFileMessage {
id: name.clone(),
message_type: "file".to_string(),
file_type,
file_name: name,
version_number: 1,
version_id: Uuid::new_v4().to_string(),
status: "completed".to_string(),
file: Some(current_lines),
}));
}
// messages.push(BusterThreadMessage::File(BusterFileMessage {
// id: name.clone(),
// message_type: "file".to_string(),
// file_type,
// file_name: name,
// version_number: 1,
// version_id: Uuid::new_v4().to_string(),
// status: "completed".to_string(),
// file: Some(current_lines),
// }));
// }
Ok(messages)
}
_ => Err(anyhow::anyhow!("Tool create file only supports complete.")),
}
} else {
Err(anyhow::anyhow!("Tool create file requires progress."))
}
}
// Ok(messages)
// }
// _ => Err(anyhow::anyhow!("Tool create file only supports complete.")),
// }
// } else {
// Err(anyhow::anyhow!("Tool create file requires progress."))
// }
// }
fn tool_modify_file(
id: Option<String>,

View File

@ -383,6 +383,7 @@ impl Agent {
user_id: thread.user_id.to_string(),
session_id: thread.id.to_string(),
}),
store: Some(true),
..Default::default()
};
@ -442,7 +443,9 @@ impl Agent {
);
// Broadcast the tool message as soon as we receive it
self.get_stream_sender().await.send(Ok(tool_message.clone()))?;
self.get_stream_sender()
.await
.send(Ok(tool_message.clone()))?;
// Update thread with tool response
self.update_current_thread(tool_message.clone()).await?;
@ -474,7 +477,12 @@ impl Agent {
}
/// Get a reference to the tools map
pub async fn get_tools(&self) -> tokio::sync::RwLockReadGuard<'_, HashMap<String, Box<dyn ToolExecutor<Output = Value, Params = Value> + Send + Sync>>> {
pub async fn get_tools(
&self,
) -> tokio::sync::RwLockReadGuard<
'_,
HashMap<String, Box<dyn ToolExecutor<Output = Value, Params = Value> + Send + Sync>>,
> {
self.tools.read().await
}
}
@ -577,7 +585,12 @@ mod tests {
}
impl WeatherTool {
async fn send_progress(&self, content: String, tool_id: String, progress: MessageProgress) -> Result<()> {
async fn send_progress(
&self,
content: String,
tool_id: String,
progress: MessageProgress,
) -> Result<()> {
let message = Message::tool(
None,
content,
@ -596,7 +609,12 @@ mod tests {
type Params = Value;
async fn execute(&self, params: Self::Params) -> Result<Self::Output> {
self.send_progress("Fetching weather data...".to_string(), "123".to_string(), MessageProgress::InProgress).await?;
self.send_progress(
"Fetching weather data...".to_string(),
"123".to_string(),
MessageProgress::InProgress,
)
.await?;
// Simulate a delay
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
@ -606,7 +624,12 @@ mod tests {
"unit": "fahrenheit"
});
self.send_progress(serde_json::to_string(&result)?, "123".to_string(), MessageProgress::Complete).await?;
self.send_progress(
serde_json::to_string(&result)?,
"123".to_string(),
MessageProgress::Complete,
)
.await?;
Ok(result)
}

View File

@ -9,8 +9,7 @@ use crate::utils::{
tools::{
agents_as_tools::dashboard_agent_tool::DashboardAgentOutput,
file_tools::{
CreateDashboardFilesTool, CreateMetricFilesTool, ModifyDashboardFilesTool,
ModifyMetricFilesTool,
CreateDashboardFilesTool, CreateMetricFilesTool, FilterDashboardFilesTool, ModifyDashboardFilesTool, ModifyMetricFilesTool
},
IntoValueTool, ToolExecutor,
},
@ -30,6 +29,7 @@ impl DashboardAgent {
let modify_dashboard_tool = ModifyDashboardFilesTool::new(Arc::clone(&self.agent));
let create_metric_tool = CreateMetricFilesTool::new(Arc::clone(&self.agent));
let modify_metric_tool = ModifyMetricFilesTool::new(Arc::clone(&self.agent));
let filter_dashboard_tool = FilterDashboardFilesTool::new(Arc::clone(&self.agent));
// Add tools to the agent
self.agent
@ -56,6 +56,12 @@ impl DashboardAgent {
modify_metric_tool.into_value_tool(),
)
.await;
self.agent
.add_tool(
filter_dashboard_tool.get_name(),
filter_dashboard_tool.into_value_tool(),
)
.await;
Ok(())
}
@ -123,4 +129,9 @@ Follow these detailed instructions to decide whether to call create a new dashbo
Your Overall Goal
Your objective is to ensure that the dashboards in the system remain relevant, unique, and up-to-date with the latest user requirements. Analyze the provided context carefully, then determine whether you need to create a new dashboard or modify an existing one (or create/modify metrics, or filter dashboards). Finally, invoke the correct toolusing an array of YAML files formatted exactly as specified above, or the appropriate parameters for `filter_dashboard`.
### Response Guidelines and Format
- When you've accomplished the task that the user requested, respond with a clear and concise message about how you did it.
- Do not include yml in your response.
"##;

View File

@ -116,4 +116,8 @@ Once the you determine that your analysis is adequate, you have finished your wo
- Use explicit ordering for custom buckets or categories.
---
Your objective is to thoroughly explore the data in order to address the user's request, gather all relevant metrics, spot potential relationships or insights, and present a comprehensive view rather than focusing on a single measure.
### Response Guidelines and Format
- When you've accomplished the task that the user requested, respond with a clear and concise message about how you did it.
- Do not include yml in your response.
"##;

View File

@ -1,6 +1,5 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::broadcast;
@ -11,7 +10,6 @@ use crate::utils::tools::file_tools::SendAssetsToUserTool;
use crate::utils::{
agent::{agent::AgentError, Agent, AgentExt, AgentThread},
tools::{
agents_as_tools::ExploratoryAgentTool,
file_tools::{SearchDataCatalogTool, SearchFilesTool},
IntoValueTool, ToolExecutor,
},
@ -51,7 +49,6 @@ impl ManagerAgent {
let search_files_tool = SearchFilesTool::new(Arc::clone(&self.agent));
let create_or_modify_metrics_tool = MetricAgentTool::new(Arc::clone(&self.agent));
let create_or_modify_dashboards_tool = DashboardAgentTool::new(Arc::clone(&self.agent));
let exploratory_agent_tool = ExploratoryAgentTool::new(Arc::clone(&self.agent));
// Add tools to the agent
self.agent
@ -78,12 +75,6 @@ impl ManagerAgent {
create_or_modify_dashboards_tool.into_value_tool(),
)
.await;
self.agent
.add_tool(
exploratory_agent_tool.get_name(),
exploratory_agent_tool.into_value_tool(),
)
.await;
if include_send_assets {
let send_assets_to_user = SendAssetsToUserTool::new(Arc::clone(&self.agent));
@ -141,7 +132,7 @@ impl ManagerAgent {
const MANAGER_AGENT_PROMPT: &str = r##"
### Role & Task
You are an expert analytics and data engineer who helps non-technical users get fast, accurate answers to their analytics questions. Your name is Buster.
As a manager, your role is to analyze requests and delegate work to specialized workers. You can either use tools directly or assign tasks to worker agents who are experts in their domains.
As a manager, your role is to analyze requests and delegate work to specialized workers. Take immediate action using available tools and workers - do not explain what you plan to do first.
### Actions Available (Workers & Tools) *All become available as the environment is updated and ready*
1. **search_data_catalog**
@ -153,35 +144,32 @@ As a manager, your role is to analyze requests and delegate work to specialized
- Delegate metric creation/updates to this specialized worker
- For single visualizations or small sets of related charts
- The worker handles SQL writing and visualization configuration
- Let the worker handle the details while you manage the process
- Use this for most visualization requests unless a full dashboard is needed
3. **dashboard_worker**
- Delegate dashboard creation/updates to this specialized worker
- For full dashboards with multiple charts
- Only use when multiple metrics need to be organized into a cohesive dashboard view
- For creating new dashboards or updating existing ones with multiple related visualizations
- Use metric_worker instead if only creating/updating individual charts
- The worker handles SQL and visualization configuration
- Trust the worker to handle dashboard-specific details
4. **exploratory_worker**
- Delegate deep-dive investigations to this analysis expert
- Worker can run multiple SQL queries and analyze results
- Only use when broad exploration is needed
- Skip for simple metric requests that can go directly to metric_worker
5. **search_files**
4. **search_files**
- Only use when user explicitly asks to search through files
- For finding previously created content
- Do not use unless specifically requested
6. **send_assets_to_user**
5. **send_assets_to_user**
- Use after workers complete their metric/dashboard tasks
- Specifies which assets to show the user
- Skip if no assets were created/modified
### Response Guidelines and Format
- When you've accomplished the task that the user requested, respond with a clear and concise message about how you did it.
- Do not include yml in your response.
### Key Guidelines
- You are a manager - delegate work to specialized workers when possible
- Take immediate action - do not explain your plan first
- Search data catalog first unless you have context
- Don't ask clarifying questions - make reasonable assumptions
- Workers handle the SQL and visualization details
- Only respond after completing the requested tasks
- Supported charts: tables, line, bar, histogram, pie/donut, metric cards, scatter plots
- Respond with clear, concise explanations of what was delegated and accomplished
- Keep final responses clear and concise, focusing on what was accomplished
"##;

View File

@ -102,4 +102,9 @@ Follow these detailed instructions to decide whether to call create a new metric
Your Overall Goal
Your objective is to ensure that the metrics in the system remain relevant, unique, and up-to-date with the latest user requirements. Analyze the provided context carefully, then determine whether you need to create a new metric or modify an existing one. Finally, invoke the correct tooleither bulk_create_metric or bulk_modify_metricusing an array of YAML files formatted exactly as specified above.
### Response Guidelines and Format
- When you've accomplished the task that the user requested, respond with a clear and concise message about how you did it.
- Do not include yml in your response.
"##;

View File

@ -91,7 +91,7 @@ impl ToolExecutor for DashboardAgentTool {
fn get_schema(&self) -> Value {
serde_json::json!({
"name": self.get_name(),
"description": "Use to create or update entire dashboards, including creating and configuring the underlying metrics and visualizations. This tool can handle both creating new metrics and organizing them into a cohesive dashboard layout. Ideal for when you need to create multiple related metrics and arrange them meaningfully on a single page.",
"description": "Specifically designed for creating or updating dashboards that require multiple related metrics. This tool excels at understanding complex dashboard requirements and automatically determining which metrics to create and how to organize them. Only use when you need to work with multiple metrics that should be displayed together on a single dashboard page.",
"strict": true,
"parameters": {
"type": "object",
@ -101,7 +101,7 @@ impl ToolExecutor for DashboardAgentTool {
"properties": {
"ticket_description": {
"type": "string",
"description": "A brief description for the action. This should be written as a command (e.g., 'Create a dashboard showing daily sales metrics...', 'Update the performance dashboard to include CPU metrics...'). The description should clearly state both the dashboard requirements and any metrics that need to be created. Copy the user's request exactly without adding instructions, thoughts, or assumptions."
"description": "The high-level requirements for what needs to be monitored or analyzed via the dashboard. Focus on describing the business or technical needs (e.g., 'We need to monitor our application's overall health including memory usage, CPU, and error rates', or 'Create a sales overview dashboard that shows our daily revenue, top products, and regional performance'). The dashboard worker will determine the specific metrics needed and their optimal arrangement."
}
},
"additionalProperties": false

View File

@ -3,8 +3,8 @@ use async_trait::async_trait;
use litellm::Message as AgentMessage;
use serde::Deserialize;
use serde_json::Value;
use tokio::sync::broadcast;
use std::sync::Arc;
use tokio::sync::broadcast;
use uuid::Uuid;
use crate::utils::{
@ -84,7 +84,7 @@ impl ToolExecutor for MetricAgentTool {
fn get_schema(&self) -> Value {
serde_json::json!({
"name": self.get_name(),
"description": "Use to create or update individual metrics, charts, or tables. This is suitable for a single chart/visualization (or several individual metrics) that does not require building an entire dashboard. This tool is most effective for direct metric requests or specific questions that can be answered with a single metric. It is less suitable for ambiguous or complex multi-part requests.",
"description": "Use to create or update individual metrics or visualizations. This tool is most effective for direct, single-metric requests or when a user explicitly asks for a specific number of metrics (e.g., '2 metrics showing...' or '3 charts for...'). It is not suitable for ambiguous or complex multi-part requests.",
"strict": true,
"parameters": {
"type": "object",
@ -94,7 +94,7 @@ impl ToolExecutor for MetricAgentTool {
"properties": {
"ticket_description": {
"type": "string",
"description": "A brief description for the action. This should essentially be a ticket description that can be appended to a ticket. The ticket description should explain which parts of the user's request this action addresses. Copy the user's request exactly without adding instructions, thoughts, or assumptions. Write it as a command, e.g., 'Create a bar chart showing...', 'Add a metric for...', etc."
"description": "A brief description containing the general requirements for the metric(s). Focus on what needs to be measured or visualized. Write it as a command, e.g., 'Create a bar chart showing...', 'Generate 2 metrics that measure...', 'Add a metric for...', etc."
}
},
"additionalProperties": false
@ -114,7 +114,11 @@ async fn process_agent_output(
Ok(msg) => {
println!("Agent message: {:?}", msg);
match msg {
AgentMessage::Assistant { content: Some(content), tool_calls: None, .. } => {
AgentMessage::Assistant {
content: Some(content),
tool_calls: None,
..
} => {
// Return the collected output with the final message
return Ok(MetricAgentOutput {
message: content,
@ -126,7 +130,8 @@ async fn process_agent_output(
// Process tool output
if let Ok(output) = serde_json::from_str::<Value>(&content) {
// Collect files
if let Some(file_array) = output.get("files").and_then(|f| f.as_array()) {
if let Some(file_array) = output.get("files").and_then(|f| f.as_array())
{
files.extend(file_array.iter().cloned());
}
}

View File

@ -167,7 +167,9 @@ impl ToolExecutor for CreateDashboardFilesTool {
for (i, yml) in dashboard_ymls.into_iter().enumerate() {
created_files.push(FileWithId {
id: dashboard_records[i].id,
content: FileEnum::Dashboard(yml),
name: dashboard_records[i].name.clone(),
file_type: "dashboard".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
});
}
}

View File

@ -1,363 +0,0 @@
use std::time::Instant;
use std::sync::Arc;
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use chrono::Utc;
use diesel::insert_into;
use diesel_async::RunQueryDsl;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tracing::debug;
use uuid::Uuid;
use crate::{
database_dep::{
enums::Verification,
lib::get_pg_pool,
models::{DashboardFile, MetricFile},
schema::{dashboard_files, metric_files},
},
utils::{agent::Agent, tools::ToolExecutor},
};
use super::{
common::{validate_metric_ids, validate_sql},
file_types::{dashboard_yml::DashboardYml, metric_yml::MetricYml},
FileModificationTool,
};
use litellm::ToolCall;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct FileParams {
pub name: String,
pub file_type: String,
pub yml_content: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateFilesParams {
pub files: Vec<FileParams>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateFilesOutput {
pub message: String,
pub duration: i64,
pub files: Vec<CreateFilesFile>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateFilesFile {
pub name: String,
pub file_type: String,
pub yml_content: String,
}
pub struct CreateFilesTool {
agent: Arc<Agent>
}
impl CreateFilesTool {
pub fn new(agent: Arc<Agent>) -> Self {
Self { agent }
}
}
impl FileModificationTool for CreateFilesTool {}
/// Process a metric file creation request
/// Returns Ok((MetricFile, MetricYml)) if successful, or an error message if failed
async fn process_metric_file(file: FileParams) -> Result<(MetricFile, MetricYml), String> {
debug!("Processing metric file creation: {}", file.name);
let metric_yml = MetricYml::new(file.yml_content.clone())
.map_err(|e| format!("Invalid YAML format: {}", e))?;
let metric_id = metric_yml.id.ok_or_else(|| {
"Missing required field 'id'".to_string()
})?;
// Check if dataset_ids is empty
if metric_yml.dataset_ids.is_empty() {
return Err("Missing required field 'dataset_ids'".to_string());
}
// Use the first dataset_id for SQL validation
let dataset_id = metric_yml.dataset_ids[0];
debug!("Validating SQL using dataset_id: {}", dataset_id);
// Validate SQL with the selected dataset_id
if let Err(e) = validate_sql(&metric_yml.sql, &dataset_id).await {
return Err(format!("Invalid SQL query: {}", e));
}
let metric_file = MetricFile {
id: metric_id,
name: metric_yml.title.clone(),
file_name: format!("{}.yml", file.name),
content: serde_json::to_value(metric_yml.clone())
.map_err(|e| format!("Failed to process metric: {}", e))?,
created_by: Uuid::new_v4(),
verification: Verification::NotRequested,
evaluation_obj: None,
evaluation_summary: None,
evaluation_score: None,
organization_id: Uuid::new_v4(),
created_at: Utc::now(),
updated_at: Utc::now(),
deleted_at: None,
};
Ok((metric_file, metric_yml))
}
/// Process a dashboard file creation request
/// Returns Ok((DashboardFile, DashboardYml)) if successful, or an error message if failed
async fn process_dashboard_file(file: FileParams) -> Result<(DashboardFile, DashboardYml), String> {
debug!("Processing dashboard file creation: {}", file.name);
let dashboard_yml = DashboardYml::new(file.yml_content.clone())
.map_err(|e| format!("Invalid YAML format: {}", e))?;
let dashboard_id = dashboard_yml.id.ok_or_else(|| {
"Missing required field 'id'".to_string()
})?;
// Collect and validate metric IDs from rows
let metric_ids: Vec<Uuid> = dashboard_yml
.rows
.iter()
.flat_map(|row| row.items.iter())
.map(|item| item.id)
.collect();
if !metric_ids.is_empty() {
match validate_metric_ids(&metric_ids).await {
Ok(missing_ids) if !missing_ids.is_empty() => {
return Err(format!("Invalid metric references: {:?}", missing_ids));
}
Err(e) => {
return Err(format!("Failed to validate metrics: {}", e));
}
Ok(_) => (), // All metrics exist
}
}
let dashboard_file = DashboardFile {
id: dashboard_id,
name: dashboard_yml.name.clone(),
file_name: format!("{}.yml", file.name),
content: serde_json::to_value(dashboard_yml.clone())
.map_err(|e| format!("Failed to process dashboard: {}", e))?,
filter: None,
organization_id: Uuid::new_v4(),
created_by: Uuid::new_v4(),
created_at: Utc::now(),
updated_at: Utc::now(),
deleted_at: None,
};
Ok((dashboard_file, dashboard_yml))
}
#[async_trait]
impl ToolExecutor for CreateFilesTool {
type Output = CreateFilesOutput;
type Params = CreateFilesParams;
fn get_name(&self) -> String {
"create_files".to_string()
}
async fn is_enabled(&self) -> bool {
true
}
async fn execute(&self, params: Self::Params) -> Result<Self::Output> {
let start_time = Instant::now();
let files = params.files;
let mut created_files = vec![];
let mut failed_files = vec![];
// Separate files by type and validate/prepare them
let mut metric_records = vec![];
let mut dashboard_records = vec![];
let mut metric_ymls = vec![];
let mut dashboard_ymls = vec![];
// First pass - validate and prepare all records
for file in files {
match file.file_type.as_str() {
"metric" => match process_metric_file(file.clone()).await {
Ok((metric_file, metric_yml)) => {
metric_records.push(metric_file);
metric_ymls.push(metric_yml);
}
Err(e) => {
failed_files.push((file.name, e));
}
},
"dashboard" => match process_dashboard_file(file.clone()).await {
Ok((dashboard_file, dashboard_yml)) => {
dashboard_records.push(dashboard_file);
dashboard_ymls.push(dashboard_yml);
}
Err(e) => {
failed_files.push((file.name, e));
}
},
_ => {
failed_files.push((
file.name,
format!(
"Invalid file type: {}. Currently only `metric` and `dashboard` types are supported.",
file.file_type
),
));
}
}
}
// Second pass - bulk insert records
let mut conn = match get_pg_pool().get().await {
Ok(conn) => conn,
Err(e) => return Err(anyhow!(e)),
};
// Insert metric files
if !metric_records.is_empty() {
match insert_into(metric_files::table)
.values(&metric_records)
.execute(&mut conn)
.await
{
Ok(_) => {
for (i, yml) in metric_ymls.into_iter().enumerate() {
created_files.push(CreateFilesFile {
name: metric_records[i]
.file_name
.trim_end_matches(".yml")
.to_string(),
file_type: "metric".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
});
}
}
Err(e) => {
failed_files.extend(metric_records.iter().map(|r| {
(
r.file_name.clone(),
format!("Failed to create metric file: {}", e),
)
}));
}
}
}
// Insert dashboard files
if !dashboard_records.is_empty() {
match insert_into(dashboard_files::table)
.values(&dashboard_records)
.execute(&mut conn)
.await
{
Ok(_) => {
for (i, yml) in dashboard_ymls.into_iter().enumerate() {
created_files.push(CreateFilesFile {
name: dashboard_records[i]
.file_name
.trim_end_matches(".yml")
.to_string(),
file_type: "dashboard".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
});
}
}
Err(e) => {
failed_files.extend(dashboard_records.iter().map(|r| {
(
r.file_name.clone(),
format!("Failed to create dashboard file: {}", e),
)
}));
}
}
}
let message = if failed_files.is_empty() {
format!("Successfully created {} files.", created_files.len())
} else {
let success_msg = if !created_files.is_empty() {
format!("Successfully created {} files. ", created_files.len())
} else {
String::new()
};
let failures: Vec<String> = failed_files
.iter()
.map(|(name, error)| format!("Failed to create '{}': {}", name, error))
.collect();
if failures.len() == 1 {
format!("{}{}.", success_msg.trim(), failures[0])
} else {
format!(
"{}Failed to create {} files:\n{}",
success_msg,
failures.len(),
failures.join("\n")
)
}
};
let duration = start_time.elapsed().as_millis() as i64;
Ok(CreateFilesOutput {
message,
duration,
files: created_files,
})
}
fn get_schema(&self) -> Value {
serde_json::json!({
"name": "create_files",
"strict": true,
"parameters": {
"type": "object",
"required": ["files"],
"properties": {
"files": {
"type": "array",
"items": {
"type": "object",
"required": ["name", "file_type", "yml_content"],
"properties": {
"name": {
"type": "string",
"description": "The name of the file to be created. This should exclude the file extension. (i.e. '.yml')"
},
"file_type": {
"type": "string",
"enum": ["metric", "dashboard"],
"description": "The type of file to create. All files in a single request must be of the same type. Metrics and dashboards cannot be created in the same request."
},
"yml_content": {
"type": "string",
"description": "The YAML content defining the metric or dashboard configuration"
}
},
"additionalProperties": false
},
"description": "Array of files to create. All files in a single request must be of the same type (either all metrics or all dashboards). Metrics must be created in a separate request from dashboards since dashboards depend on existing metrics."
}
},
"additionalProperties": false
},
"description": "Creates **new** metric or dashboard files. Use this if no existing file can fulfill the user's needs. IMPORTANT: Metrics and dashboards must be created in separate requests - you cannot mix them in the same request. Create metrics first, then create dashboards that reference those metrics in a separate request.Guard Rail: Do not execute any file creation or modifications until a thorough data catalog search has been completed and reviewed."
})
}
}

View File

@ -27,8 +27,6 @@ use super::{
FileModificationTool,
};
use litellm::ToolCall;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct MetricFileParams {
pub name: String,
@ -168,7 +166,9 @@ impl ToolExecutor for CreateMetricFilesTool {
for (i, yml) in metric_ymls.into_iter().enumerate() {
created_files.push(FileWithId {
id: metric_records[i].id,
content: FileEnum::Metric(yml),
name: metric_records[i].name.clone(),
file_type: "metric".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
});
}
}

View File

@ -15,7 +15,9 @@ pub struct File {
#[derive(Debug, Serialize)]
pub struct FileWithId {
pub id: Uuid,
pub content: FileEnum,
pub name: String,
pub file_type: String,
pub yml_content: String,
}
#[derive(Debug, Serialize, Deserialize)]

View File

@ -1,17 +1,25 @@
use std::time::Instant;
use std::sync::Arc;
use std::collections::HashMap;
use std::time::Instant;
use anyhow::Result;
use async_trait::async_trait;
use chrono::Utc;
use diesel::{upsert::excluded, ExpressionMethods, QueryDsl};
use diesel_async::RunQueryDsl;
use serde_json::Value;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use serde_json::Value;
use tracing::{debug, error, info, warn};
use uuid::Uuid;
use super::{
common::{validate_metric_ids, validate_sql},
file_types::{
dashboard_yml::DashboardYml,
file::{FileEnum, FileWithId},
metric_yml::MetricYml,
},
FileModificationTool,
};
use crate::{
database_dep::{
enums::Verification,
@ -19,46 +27,40 @@ use crate::{
models::{DashboardFile, MetricFile},
schema::{dashboard_files, metric_files},
},
utils::{
tools::ToolExecutor,
agent::Agent,
},
};
use super::{
file_types::{
dashboard_yml::DashboardYml,
file::FileEnum,
metric_yml::MetricYml,
},
FileModificationTool,
common::{validate_sql, validate_metric_ids},
utils::{agent::Agent, tools::ToolExecutor},
};
use litellm::ToolCall;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Modification {
/// The new content to be inserted at the specified line numbers.
/// Must follow the metric configuration YAML schema.
pub new_content: String,
/// Array of line numbers where modifications should be applied.
/// Must contain exactly 2 numbers representing the start and end lines.
pub line_numbers: Vec<i64>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct FileModification {
/// UUID of the file to modify
pub id: Uuid,
pub file_type: String,
/// Name of the file to modify
pub file_name: String,
/// List of modifications to apply to the file
pub modifications: Vec<Modification>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ModifyFilesParams {
/// List of files to modify with their corresponding modifications
pub files: Vec<FileModification>,
}
#[derive(Debug, Serialize)]
struct ModificationResult {
file_id: Uuid,
file_type: String,
file_name: String,
success: bool,
original_lines: Vec<i64>,
@ -72,9 +74,7 @@ struct ModificationResult {
#[derive(Debug)]
struct FileModificationBatch {
metric_files: Vec<MetricFile>,
dashboard_files: Vec<DashboardFile>,
metric_ymls: Vec<MetricYml>,
dashboard_ymls: Vec<DashboardYml>,
failed_modifications: Vec<(String, String)>,
modification_results: Vec<ModificationResult>,
}
@ -91,7 +91,7 @@ impl LineAdjustment {
fn new(original_start: i64, original_end: i64, new_length: i64) -> Self {
let original_length = original_end - original_start + 1;
let offset = new_length - original_length;
Self {
original_start,
original_end,
@ -157,7 +157,11 @@ fn expand_line_range(line_numbers: &[i64]) -> Vec<i64> {
(start..=end).collect()
}
fn apply_modifications_to_content(content: &str, modifications: &[Modification], file_name: &str) -> Result<String> {
fn apply_modifications_to_content(
content: &str,
modifications: &[Modification],
file_name: &str,
) -> Result<String> {
let mut lines: Vec<&str> = content.lines().collect();
let mut modified_lines = lines.clone();
let mut total_offset = 0;
@ -187,7 +191,7 @@ fn apply_modifications_to_content(content: &str, modifications: &[Modification],
// Expand range into sequential numbers for processing
let line_range = expand_line_range(&modification.line_numbers);
// Adjust line numbers based on previous modifications
let original_start = line_range[0] as usize - 1;
let original_end = line_range[line_range.len() - 1] as usize - 1;
@ -214,12 +218,8 @@ fn apply_modifications_to_content(content: &str, modifications: &[Modification],
// Apply the modification
let prefix = modified_lines[..adjusted_start].to_vec();
let suffix = modified_lines[adjusted_start + old_length..].to_vec();
modified_lines = [
prefix,
new_lines,
suffix,
].concat();
modified_lines = [prefix, new_lines, suffix].concat();
}
Ok(modified_lines.join("\n"))
@ -229,70 +229,60 @@ fn apply_modifications_to_content(content: &str, modifications: &[Modification],
pub struct ModifyFilesOutput {
message: String,
duration: i64,
files: Vec<FileEnum>,
files: Vec<FileWithId>,
}
pub struct ModifyFilesTool {
agent: Arc<Agent>
pub struct FilterDashboardFilesTool {
agent: Arc<Agent>,
}
impl ModifyFilesTool {
impl FilterDashboardFilesTool {
pub fn new(agent: Arc<Agent>) -> Self {
Self { agent }
}
}
impl FileModificationTool for ModifyFilesTool {}
impl FileModificationTool for FilterDashboardFilesTool {}
#[async_trait]
impl ToolExecutor for ModifyFilesTool {
impl ToolExecutor for FilterDashboardFilesTool {
type Output = ModifyFilesOutput;
type Params = ModifyFilesParams;
fn get_name(&self) -> String {
"modify_files".to_string()
"filter_dashboard_files".to_string()
}
async fn is_enabled(&self) -> bool {
true
match (
self.agent.get_state_value("metrics_available").await,
self.agent.get_state_value("dashboards_available").await,
) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => false,
}
}
async fn execute(&self, params: Self::Params) -> Result<Self::Output> {
let start_time = Instant::now();
debug!("Starting file modification execution");
info!("Processing {} files for modification", params.files.len());
// Initialize batch processing structures
let mut batch = FileModificationBatch {
metric_files: Vec::new(),
dashboard_files: Vec::new(),
metric_ymls: Vec::new(),
dashboard_ymls: Vec::new(),
failed_modifications: Vec::new(),
modification_results: Vec::new(),
};
// Group files by type and fetch existing records
let mut metric_ids = Vec::new();
let mut dashboard_ids = Vec::new();
let mut file_map = std::collections::HashMap::new();
for file in &params.files {
match file.file_type.as_str() {
"metric" => metric_ids.push(file.id),
"dashboard" => dashboard_ids.push(file.id),
_ => {
batch.failed_modifications.push((
file.file_name.clone(),
format!("Invalid file type: {}", file.file_type),
));
continue;
}
}
file_map.insert(file.id, file);
}
// Collect file IDs and create map
let metric_ids: Vec<Uuid> = params.files.iter().map(|f| f.id).collect();
let file_map: std::collections::HashMap<_, _> =
params.files.iter().map(|f| (f.id, f)).collect();
// Get database connection
let mut conn = match get_pg_pool().get().await {
@ -319,17 +309,22 @@ impl ToolExecutor for ModifyFilesTool {
Ok(files) => {
for file in files {
if let Some(modifications) = file_map.get(&file.id) {
match process_metric_file(file, modifications, start_time.elapsed().as_millis() as i64).await {
match process_metric_file(
file,
modifications,
start_time.elapsed().as_millis() as i64,
)
.await
{
Ok((metric_file, metric_yml, results)) => {
batch.metric_files.push(metric_file);
batch.metric_ymls.push(metric_yml);
batch.modification_results.extend(results);
}
Err(e) => {
batch.failed_modifications.push((
modifications.file_name.clone(),
e.to_string(),
));
batch
.failed_modifications
.push((modifications.file_name.clone(), e.to_string()));
}
}
}
@ -346,45 +341,6 @@ impl ToolExecutor for ModifyFilesTool {
}
}
// Fetch dashboard files
if !dashboard_ids.is_empty() {
use crate::database_dep::schema::dashboard_files::dsl::*;
match dashboard_files
.filter(id.eq_any(dashboard_ids))
.filter(deleted_at.is_null())
.load::<DashboardFile>(&mut conn)
.await
{
Ok(files) => {
for file in files {
if let Some(modifications) = file_map.get(&file.id) {
match process_dashboard_file(file, modifications, start_time.elapsed().as_millis() as i64).await {
Ok((dashboard_file, dashboard_yml, results)) => {
batch.dashboard_files.push(dashboard_file);
batch.dashboard_ymls.push(dashboard_yml);
batch.modification_results.extend(results);
}
Err(e) => {
batch.failed_modifications.push((
modifications.file_name.clone(),
e.to_string(),
));
}
}
}
}
}
Err(e) => {
let duration = start_time.elapsed().as_millis() as i64;
return Ok(ModifyFilesOutput {
message: format!("Failed to fetch dashboard files: {}", e),
files: Vec::new(),
duration,
});
}
}
}
// Process results and generate output message
let duration = start_time.elapsed().as_millis() as i64;
let mut output = ModifyFilesOutput {
@ -409,7 +365,16 @@ impl ToolExecutor for ModifyFilesTool {
.await
{
Ok(_) => {
output.files.extend(batch.metric_ymls.into_iter().map(FileEnum::Metric));
output.files.extend(
batch.metric_files.iter().zip(batch.metric_ymls.iter()).map(
|(file, yml)| FileWithId {
id: file.id,
name: file.name.clone(),
file_type: "metric".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
},
),
);
}
Err(e) => {
batch.failed_modifications.push((
@ -420,34 +385,6 @@ impl ToolExecutor for ModifyFilesTool {
}
}
// Update dashboard files in database
if !batch.dashboard_files.is_empty() {
use diesel::insert_into;
match insert_into(dashboard_files::table)
.values(&batch.dashboard_files)
.on_conflict(dashboard_files::id)
.do_update()
.set((
dashboard_files::content.eq(excluded(dashboard_files::content)),
dashboard_files::updated_at.eq(Utc::now()),
))
.execute(&mut conn)
.await
{
Ok(_) => {
output
.files
.extend(batch.dashboard_ymls.into_iter().map(FileEnum::Dashboard));
}
Err(e) => {
batch.failed_modifications.push((
"dashboard_files".to_string(),
format!("Failed to update dashboard files: {}", e),
));
}
}
}
// Generate final message
if batch.failed_modifications.is_empty() {
output.message = format!("Successfully modified {} files.", output.files.len());
@ -477,62 +414,71 @@ impl ToolExecutor for ModifyFilesTool {
fn get_schema(&self) -> Value {
serde_json::json!({
"name": "modify_files",
"name": self.get_name(),
"description": "Modifies metric files within a dashboard to apply filtering conditions",
"strict": true,
"parameters": {
"type": "object",
"required": ["files"],
"required": [
"dashboard_id",
"files"
],
"properties": {
"dashboard_id": {
"type": "string",
"description": "UUID of the dashboard whose metrics should be filtered"
},
"files": {
"type": "array",
"description": "List of metric files within the dashboard to modify with filtering conditions",
"items": {
"type": "object",
"required": ["id", "file_type", "file_name", "modifications"],
"required": [
"id",
"file_name",
"modifications"
],
"properties": {
"id": {
"type": "string",
"description": "The UUID of the file to modify"
},
"file_type": {
"type": "string",
"enum": ["metric", "dashboard"],
"description": "The type of file to modify"
"description": "UUID of the metric file to modify"
},
"file_name": {
"type": "string",
"description": "The name of the file being modified"
"description": "Name of the metric file to modify"
},
"modifications": {
"type": "array",
"description": "List of filtering modifications to apply to the metric",
"items": {
"type": "object",
"required": ["new_content", "line_numbers"],
"required": [
"new_content",
"line_numbers"
],
"properties": {
"new_content": {
"type": "string",
"description": "The new content that will replace the existing lines. If continuous line changes are made, then you should keep them together."
"description": "The new content containing the filtering conditions (e.g., time range filters)"
},
"line_numbers": {
"type": "array",
"description": "Array of line numbers where filter modifications should be applied",
"items": {
"type": "number"
},
"description": "Array containing exactly 2 numbers [start,end] specifying the range of lines to replace. For example, [1,5] replaces lines 1 through 5. For a single line, use [n,n] (e.g., [3,3] for line 3)."
"type": "integer"
}
}
},
"additionalProperties": false
},
"description": "List of modifications to be made to the file."
}
}
},
"additionalProperties": false
},
"description": "Array of files to modify with their modifications."
}
}
},
"additionalProperties": false
},
"description": "Makes multiple line-level modifications to one or more existing YAML files in a single call. Line numbers are specified as [start,end] ranges. If you need to update SQL, chart config, or other sections within a file, use this.Guard Rail: Do not execute any file creation or modifications until a thorough data catalog search has been completed and reviewed."
}
})
}
}
@ -547,9 +493,9 @@ async fn process_metric_file(
file_name = %modification.file_name,
"Processing metric file modifications"
);
let mut results = Vec::new();
// Parse existing content
let current_yml: MetricYml = match serde_json::from_value(file.content.clone()) {
Ok(yml) => yml,
@ -563,7 +509,6 @@ async fn process_metric_file(
);
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: vec![],
@ -576,7 +521,7 @@ async fn process_metric_file(
return Err(anyhow::anyhow!(error));
}
};
// Convert to YAML string for line modifications
let current_content = match serde_yaml::to_string(&current_yml) {
Ok(content) => content,
@ -590,7 +535,6 @@ async fn process_metric_file(
);
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: vec![],
@ -603,18 +547,22 @@ async fn process_metric_file(
return Err(anyhow::anyhow!(error));
}
};
// Track original line numbers before modifications
let mut original_lines = Vec::new();
let mut adjusted_lines = Vec::new();
// Apply modifications
for m in &modification.modifications {
original_lines.extend(m.line_numbers.clone());
}
// Apply modifications and track results
match apply_modifications_to_content(&current_content, &modification.modifications, &modification.file_name) {
match apply_modifications_to_content(
&current_content,
&modification.modifications,
&modification.file_name,
) {
Ok(modified_content) => {
// Create and validate new YML object
match MetricYml::new(modified_content) {
@ -638,7 +586,6 @@ async fn process_metric_file(
);
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: original_lines.clone(),
@ -651,16 +598,15 @@ async fn process_metric_file(
return Err(anyhow::anyhow!(error));
}
}
// Update file record
file.content = serde_json::to_value(&new_yml)?;
file.updated_at = Utc::now();
file.verification = Verification::NotRequested;
// Track successful modification
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: true,
original_lines: original_lines.clone(),
@ -670,7 +616,7 @@ async fn process_metric_file(
timestamp: Utc::now(),
duration,
});
Ok((file, new_yml, results))
}
Err(e) => {
@ -683,7 +629,6 @@ async fn process_metric_file(
);
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines,
@ -707,222 +652,6 @@ async fn process_metric_file(
);
results.push(ModificationResult {
file_id: file.id,
file_type: "metric".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines,
adjusted_lines: vec![],
error: Some(error.clone()),
modification_type: "modification".to_string(),
timestamp: Utc::now(),
duration,
});
Err(anyhow::anyhow!(error))
}
}
}
async fn process_dashboard_file(
mut file: DashboardFile,
modification: &FileModification,
duration: i64,
) -> Result<(DashboardFile, DashboardYml, Vec<ModificationResult>)> {
debug!(
file_id = %file.id,
file_name = %modification.file_name,
"Processing dashboard file modifications"
);
let mut results = Vec::new();
// Parse existing content
let current_yml: DashboardYml = match serde_json::from_value(file.content.clone()) {
Ok(yml) => yml,
Err(e) => {
let error = format!("Failed to parse existing dashboard YAML: {}", e);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"YAML parsing error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: vec![],
adjusted_lines: vec![],
error: Some(error.clone()),
modification_type: "parsing".to_string(),
timestamp: Utc::now(),
duration,
});
return Err(anyhow::anyhow!(error));
}
};
// Convert to YAML string for line modifications
let current_content = match serde_yaml::to_string(&current_yml) {
Ok(content) => content,
Err(e) => {
let error = format!("Failed to serialize dashboard YAML: {}", e);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"YAML serialization error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: vec![],
adjusted_lines: vec![],
error: Some(error.clone()),
modification_type: "serialization".to_string(),
timestamp: Utc::now(),
duration,
});
return Err(anyhow::anyhow!(error));
}
};
// Track original line numbers before modifications
let mut original_lines = Vec::new();
let mut adjusted_lines = Vec::new();
// Apply modifications
for m in &modification.modifications {
original_lines.extend(m.line_numbers.clone());
}
// Apply modifications and track results
match apply_modifications_to_content(&current_content, &modification.modifications, &modification.file_name) {
Ok(modified_content) => {
// Create and validate new YML object
match DashboardYml::new(modified_content) {
Ok(new_yml) => {
debug!(
file_id = %file.id,
file_name = %modification.file_name,
"Successfully modified and validated dashboard file"
);
// Collect all metric IDs from rows
let metric_ids: Vec<Uuid> = new_yml.rows
.iter()
.flat_map(|row| row.items.iter())
.map(|item| item.id)
.collect();
// Validate metric IDs if any exist
if !metric_ids.is_empty() {
match validate_metric_ids(&metric_ids).await {
Ok(missing_ids) if !missing_ids.is_empty() => {
let error = format!("Referenced metrics not found: {:?}", missing_ids);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"Metric validation error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: original_lines.clone(),
adjusted_lines: adjusted_lines.clone(),
error: Some(error.clone()),
modification_type: "metric_validation".to_string(),
timestamp: Utc::now(),
duration,
});
return Err(anyhow::anyhow!(error));
},
Err(e) => {
let error = format!("Failed to validate metric IDs: {}", e);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"Metric validation error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines: original_lines.clone(),
adjusted_lines: adjusted_lines.clone(),
error: Some(error.clone()),
modification_type: "metric_validation".to_string(),
timestamp: Utc::now(),
duration,
});
return Err(anyhow::anyhow!(error));
},
Ok(_) => (), // All metrics exist
}
}
// Update file record
file.content = serde_json::to_value(&new_yml)?;
file.updated_at = Utc::now();
// Track successful modification
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: true,
original_lines: original_lines.clone(),
adjusted_lines: adjusted_lines.clone(),
error: None,
modification_type: "content".to_string(),
timestamp: Utc::now(),
duration,
});
Ok((file, new_yml, results))
}
Err(e) => {
let error = format!("Failed to validate modified YAML: {}", e);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"YAML validation error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines,
adjusted_lines: vec![],
error: Some(error.clone()),
modification_type: "validation".to_string(),
timestamp: Utc::now(),
duration,
});
Err(anyhow::anyhow!(error))
}
}
}
Err(e) => {
let error = format!("Failed to apply modifications: {}", e);
error!(
file_id = %file.id,
file_name = %modification.file_name,
error = %error,
"Modification application error"
);
results.push(ModificationResult {
file_id: file.id,
file_type: "dashboard".to_string(),
file_name: modification.file_name.clone(),
success: false,
original_lines,
@ -939,6 +668,8 @@ async fn process_dashboard_file(
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use chrono::Utc;
use serde_json::json;
@ -959,7 +690,9 @@ mod tests {
// Test invalid range (end < start)
let invalid_range_err = validate_line_numbers(&[5, 3]).unwrap_err();
assert!(invalid_range_err.to_string().contains("must be greater than or equal to"));
assert!(invalid_range_err
.to_string()
.contains("must be greater than or equal to"));
// Test starting below 1
let invalid_start_err = validate_line_numbers(&[0, 2]).unwrap_err();
@ -969,14 +702,17 @@ mod tests {
#[test]
fn test_apply_modifications_to_content() {
let original_content = "line1\nline2\nline3\nline4\nline5";
// Test single modification replacing two lines
let mods1 = vec![Modification {
new_content: "new line2\nnew line3".to_string(),
line_numbers: vec![2, 3], // Replace lines 2-3
}];
let result1 = apply_modifications_to_content(original_content, &mods1, "test.yml").unwrap();
assert_eq!(result1.trim_end(), "line1\nnew line2\nnew line3\nline4\nline5");
assert_eq!(
result1.trim_end(),
"line1\nnew line2\nnew line3\nline4\nline5"
);
// Test multiple non-overlapping modifications
let mods2 = vec![
@ -990,10 +726,14 @@ mod tests {
},
];
let result2 = apply_modifications_to_content(original_content, &mods2, "test.yml").unwrap();
assert_eq!(result2.trim_end(), "line1\nnew line2\nline3\nnew line4\nline5");
assert_eq!(
result2.trim_end(),
"line1\nnew line2\nline3\nnew line4\nline5"
);
// Test multiple modifications with line shifts
let content_with_more_lines = "line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10";
let content_with_more_lines =
"line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10";
let mods_with_shifts = vec![
Modification {
new_content: "new line2\nnew line2.1\nnew line2.2".to_string(),
@ -1008,7 +748,9 @@ mod tests {
line_numbers: vec![9, 9], // Replace line 9 with 2 lines (net +1 line)
},
];
let result_with_shifts = apply_modifications_to_content(content_with_more_lines, &mods_with_shifts, "test.yml").unwrap();
let result_with_shifts =
apply_modifications_to_content(content_with_more_lines, &mods_with_shifts, "test.yml")
.unwrap();
assert_eq!(
result_with_shifts.trim_end(),
"line1\nnew line2\nnew line2.1\nnew line2.2\nline4\nline5\nnew line6\nline8\nnew line9\nnew line9.1\nline10"
@ -1039,7 +781,8 @@ mod tests {
assert!(result4.unwrap_err().to_string().contains("out of bounds"));
// Test broader line ranges with sequential modifications
let content_with_many_lines = "line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10\nline11\nline12";
let content_with_many_lines =
"line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10\nline11\nline12";
let broad_range_mods = vec![
Modification {
new_content: "new block 1-6\nnew content".to_string(),
@ -1050,7 +793,9 @@ mod tests {
line_numbers: vec![9, 11], // Replace lines 9-11 with 3 lines (no net change)
},
];
let result_broad_range = apply_modifications_to_content(content_with_many_lines, &broad_range_mods, "test.yml").unwrap();
let result_broad_range =
apply_modifications_to_content(content_with_many_lines, &broad_range_mods, "test.yml")
.unwrap();
assert_eq!(
result_broad_range.trim_end(),
"new block 1-6\nnew content\nline7\nline8\nnew block 9-11\nextra line\nmore content\nline12"
@ -1067,16 +812,22 @@ mod tests {
line_numbers: vec![4, 8],
},
];
let result_overlapping = apply_modifications_to_content(content_with_many_lines, &overlapping_broad_mods, "test.yml");
let result_overlapping = apply_modifications_to_content(
content_with_many_lines,
&overlapping_broad_mods,
"test.yml",
);
assert!(result_overlapping.is_err());
assert!(result_overlapping.unwrap_err().to_string().contains("overlaps"));
assert!(result_overlapping
.unwrap_err()
.to_string()
.contains("overlaps"));
}
#[test]
fn test_modification_result_tracking() {
let result = ModificationResult {
file_id: Uuid::new_v4(),
file_type: "metric".to_string(),
file_name: "test.yml".to_string(),
success: true,
original_lines: vec![1, 2, 3],
@ -1089,7 +840,7 @@ mod tests {
// Test successful modification result
assert!(result.success);
assert_eq!(result.file_type, "metric");
assert_eq!(result.file_name, "test.yml");
assert_eq!(result.original_lines, vec![1, 2, 3]);
assert_eq!(result.adjusted_lines, vec![1, 2]);
assert!(result.error.is_none());
@ -1107,18 +858,19 @@ mod tests {
#[test]
fn test_tool_parameter_validation() {
let tool = ModifyFilesTool::new(Arc::new(Agent::new(
"o1".to_string(),
HashMap::new(),
Uuid::new_v4(),
Uuid::new_v4(),
)));
let tool = FilterDashboardFilesTool {
agent: Arc::new(Agent::new(
"o3-mini".to_string(),
HashMap::new(),
Uuid::new_v4(),
Uuid::new_v4(),
)),
};
// Test valid parameters
let valid_params = json!({
"files": [{
"id": Uuid::new_v4().to_string(),
"file_type": "metric",
"file_name": "test.yml",
"modifications": [{
"new_content": "test content",
@ -1130,32 +882,16 @@ mod tests {
let result = serde_json::from_str::<ModifyFilesParams>(&valid_args);
assert!(result.is_ok());
// Test invalid file type
let invalid_type_params = json!({
"files": [{
"id": Uuid::new_v4().to_string(),
"file_type": "invalid",
"file_name": "test.yml",
"modifications": [{
"new_content": "test content",
"line_numbers": [1, 2, 3]
}]
}]
});
let invalid_args = serde_json::to_string(&invalid_type_params).unwrap();
let result = serde_json::from_str::<ModifyFilesParams>(&invalid_args);
assert!(result.is_ok()); // Type validation happens during execution
// Test missing required fields
let missing_fields_params = json!({
"files": [{
"id": Uuid::new_v4().to_string(),
"file_type": "metric"
// missing file_name and modifications
"file_name": "test.yml"
// missing modifications
}]
});
let missing_args = serde_json::to_string(&missing_fields_params).unwrap();
let result = serde_json::from_str::<ModifyFilesParams>(&missing_args);
assert!(result.is_err());
}
}
}

View File

@ -1,26 +1,24 @@
pub mod common;
pub mod create_dashboard_files;
pub mod create_files;
pub mod create_metric_files;
pub mod file_types;
pub mod filter_dashboard_files;
pub mod modify_dashboard_files;
pub mod modify_files;
pub mod modify_metric_files;
pub mod open_files;
pub mod search_data_catalog;
pub mod search_files;
pub mod send_assets_to_user;
pub use create_files::CreateFilesTool;
pub use modify_files::ModifyFilesTool;
pub use create_dashboard_files::CreateDashboardFilesTool;
pub use create_metric_files::CreateMetricFilesTool;
pub use filter_dashboard_files::FilterDashboardFilesTool;
pub use modify_dashboard_files::ModifyDashboardFilesTool;
pub use modify_metric_files::ModifyMetricFilesTool;
pub use open_files::OpenFilesTool;
pub use search_data_catalog::SearchDataCatalogTool;
pub use search_files::SearchFilesTool;
pub use send_assets_to_user::SendAssetsToUserTool;
pub use create_dashboard_files::CreateDashboardFilesTool;
pub use create_metric_files::CreateMetricFilesTool;
pub use modify_dashboard_files::ModifyDashboardFilesTool;
pub use modify_metric_files::ModifyMetricFilesTool;
use crate::utils::tools::ToolExecutor;

View File

@ -349,7 +349,9 @@ impl ToolExecutor for ModifyDashboardFilesTool {
output.files.extend(
batch.dashboard_files.iter().zip(batch.dashboard_ymls.iter()).map(|(file, yml)| FileWithId {
id: file.id,
content: FileEnum::Dashboard(yml.clone()),
name: file.name.clone(),
file_type: "dashboard".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
})
);
}

View File

@ -360,7 +360,9 @@ impl ToolExecutor for ModifyMetricFilesTool {
output.files.extend(
batch.metric_files.iter().zip(batch.metric_ymls.iter()).map(|(file, yml)| FileWithId {
id: file.id,
content: FileEnum::Metric(yml.clone()),
name: file.name.clone(),
file_type: "metric".to_string(),
yml_content: serde_yaml::to_string(&yml).unwrap_or_default(),
})
);
}