status filtering on metrics, public dashboard functionality restored

This commit is contained in:
dal 2025-04-23 06:10:54 -06:00
parent a1a1208568
commit 4d44a1766c
No known key found for this signature in database
GPG Key ID: 16F4B0E1E9F61122
8 changed files with 537 additions and 38 deletions

View File

@ -417,20 +417,18 @@ impl FromSql<sql_types::DatasetTypeEnum, Pg> for DatasetType {
}
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
Copy,
PartialEq,
Eq,
diesel::AsExpression,
diesel::FromSqlRow,
Serialize,
Deserialize,
Clone,
Copy,
)]
#[diesel(sql_type = sql_types::VerificationEnum)]
#[serde(rename_all = "camelCase")]
#[derive(sqlx::Type)]
#[sqlx(type_name = "verification_enum", rename_all = "camelCase")]
pub enum Verification {
Verified,
Backlogged,
@ -473,7 +471,6 @@ impl FromSql<sql_types::VerificationEnum, Pg> for Verification {
Copy,
PartialEq,
Eq,
Hash,
diesel::AsExpression,
diesel::FromSqlRow,
)]

View File

@ -12,7 +12,7 @@ use tokio::task::JoinHandle;
use uuid::Uuid;
use crate::dashboards::types::{BusterShareIndividual, DashboardCollection};
use crate::metrics::get_metric_handler;
use crate::metrics::{get_metric_for_dashboard_handler, get_metric_handler};
use crate::metrics::{BusterMetric, Dataset, Version};
use database::enums::{AssetPermissionRole, AssetType, IdentityType, Verification};
use database::helpers::dashboard_files::fetch_dashboard_file_with_permission;
@ -249,12 +249,11 @@ pub async fn get_dashboard_handler(
// Fetch metrics concurrently using get_metric_handler
let mut metric_fetch_handles = Vec::new();
for metric_id in metric_ids {
let user_clone = user.clone(); // Clone user for the spawned task
// Spawn a task for each metric fetch.
// Pass None for version_number and password as the dashboard view uses the latest metric
// and access is primarily determined by dashboard permissions.
// Spawn a task for each metric fetch using the dashboard-specific handler.
// Pass only the metric_id and None for version_number.
let handle = tokio::spawn(async move {
get_metric_handler(&metric_id, &user_clone, None, None).await
// Call the new handler, no user or password needed
get_metric_for_dashboard_handler(&metric_id, None).await
});
metric_fetch_handles.push((metric_id, handle));
}

View File

@ -59,6 +59,7 @@ async fn process_single_update(
// Create an update request with just the verification status
let request = UpdateMetricRequest {
verification: Some(update.verification),
update_version: Some(false),
..UpdateMetricRequest::default()
};

View File

@ -1,10 +1,12 @@
use anyhow::{anyhow, Result};
use chrono::Utc;
use database::{
models::DashboardFile,
pool::get_pg_pool,
schema::metric_files,
types::{MetricYml, data_metadata::DataMetadata},
schema::{dashboard_files, metric_files, metric_files_to_dashboard_files},
types::{data_metadata::DataMetadata, MetricYml},
};
use diesel::{ExpressionMethods, QueryDsl};
use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, OptionalExtension, QueryDsl};
use diesel_async::RunQueryDsl;
use indexmap::IndexMap;
use middleware::AuthenticatedUser;
@ -14,7 +16,7 @@ use uuid::Uuid;
use query_engine::data_source_helpers;
use query_engine::data_types::DataType;
use crate::metrics::get_metric_handler;
use crate::metrics::{get_metric_for_dashboard_handler, get_metric_handler, BusterMetric};
/// Request structure for the get_metric_data handler
#[derive(Debug, Deserialize)]
@ -44,55 +46,164 @@ pub async fn get_metric_data_handler(
user.id
);
// Retrieve the metric definition based on version, if none, use latest.
let metric = get_metric_handler(
// --- Step 1: Try retrieving metric with standard permission checks ---
let metric_result = get_metric_handler(
&request.metric_id,
&user,
request.version_number,
request.password
).await?;
request.password.clone(), // Clone password for potential reuse/logging
)
.await;
let metric: BusterMetric = match metric_result {
Ok(metric) => {
tracing::debug!("Successfully retrieved metric via standard permissions.");
metric
}
Err(e) => {
// --- Step 2: Handle potential permission error ---
let error_string = e.to_string().to_lowercase();
let is_permission_error = error_string.contains("permission")
|| error_string.contains("expired")
|| error_string.contains("password");
if is_permission_error {
tracing::warn!(
"Initial metric access failed due to potential permission issue: {}. Checking public dashboard access.",
e
);
// --- Step 3: Check if metric belongs to a valid public dashboard ---
let mut conn_check = get_pg_pool().get().await?;
let now = Utc::now();
let public_dashboard_exists = match metric_files_to_dashboard_files::table
.inner_join(dashboard_files::table.on(
dashboard_files::id.eq(metric_files_to_dashboard_files::dashboard_file_id),
))
.filter(metric_files_to_dashboard_files::metric_file_id.eq(request.metric_id))
.filter(dashboard_files::publicly_accessible.eq(true))
.filter(dashboard_files::deleted_at.is_null())
.filter(
dashboard_files::public_expiry_date
.is_null()
.or(dashboard_files::public_expiry_date.gt(now)),
)
.select(dashboard_files::id) // Select any column to check existence
.first::<Uuid>(&mut conn_check) // Try to get the first matching ID
.await
{
Ok(id) => Some(id),
Err(diesel::NotFound) => None,
Err(e) => {
tracing::error!("Error checking if public dashboard exists: {}", e);
return Err(anyhow!("Error checking if public dashboard exists: {}", e));
}
};
if public_dashboard_exists.is_some() {
// --- Step 4: Public dashboard found, fetch metric bypassing permissions ---
tracing::info!("Found associated public dashboard. Fetching metric definition without direct permissions.");
match get_metric_for_dashboard_handler(
&request.metric_id,
request.version_number,
)
.await
{
Ok(metric_via_dashboard) => {
tracing::debug!(
"Successfully retrieved metric via public dashboard association."
);
metric_via_dashboard // Use this metric definition
}
Err(fetch_err) => {
// If fetching via dashboard fails unexpectedly, return that error
tracing::error!("Failed to fetch metric via dashboard context even though public dashboard exists: {}", fetch_err);
return Err(fetch_err);
}
}
} else {
// No public dashboard association found, return the original permission error
tracing::warn!("No valid public dashboard association found for metric. Returning original error.");
return Err(e);
}
} else {
// Error was not permission-related, return original error
tracing::error!("Metric retrieval failed for non-permission reason: {}", e);
return Err(e);
}
}
};
// --- Step 5: Proceed with data fetching using the obtained metric definition ---
tracing::debug!("Parsing metric definition from YAML to get SQL and dataset IDs.");
// Parse the metric definition from YAML to get SQL and dataset IDs
let metric_yml = serde_yaml::from_str::<MetricYml>(&metric.file)?;
let metric_yml: MetricYml = match serde_yaml::from_str(&metric.file) {
Ok(yml) => yml,
Err(parse_err) => {
tracing::error!("Failed to parse metric YAML: {}", parse_err);
return Err(anyhow!("Failed to parse metric definition: {}", parse_err));
}
};
let sql = metric_yml.sql;
let dataset_ids = metric_yml.dataset_ids;
if dataset_ids.is_empty() {
return Err(anyhow!("No dataset IDs found in metric"));
tracing::error!(
"No dataset IDs found in metric definition for metric {}",
request.metric_id
);
return Err(anyhow!("No dataset IDs found in metric definition"));
}
tracing::debug!("Found dataset IDs: {:?}", dataset_ids);
// Get the first dataset ID to use for querying
let primary_dataset_id = dataset_ids[0];
// Get the data source ID for the dataset
tracing::debug!("Fetching data sources for datasets: {:?}", dataset_ids);
let dataset_sources = data_source_helpers::get_data_sources_for_datasets(&dataset_ids).await?;
if dataset_sources.is_empty() {
tracing::error!(
"Could not find data sources for the specified datasets: {:?}",
dataset_ids
);
return Err(anyhow!(
"Could not find data sources for the specified datasets"
));
}
tracing::debug!("Found data sources: {:?}", dataset_sources);
// Find the data source for the primary dataset
let data_source = dataset_sources
.iter()
.find(|ds| ds.dataset_id == primary_dataset_id)
.ok_or_else(|| anyhow!("Primary dataset not found"))?;
.ok_or_else(|| {
tracing::error!(
"Primary dataset ID {} not found among fetched data sources",
primary_dataset_id
);
anyhow!("Primary dataset ID not found among associated data sources")
})?;
tracing::info!(
"Querying data for metric. Dataset: {}, Data source: {}",
"Querying data for metric {}. Dataset: {}, Data source: {}, Limit: {:?}",
request.metric_id,
data_source.name,
data_source.data_source_id
data_source.data_source_id,
request.limit
);
// Try to get cached metadata first
let mut conn = get_pg_pool().get().await?;
let mut conn_meta = get_pg_pool().get().await?;
let cached_metadata = metric_files::table
.filter(metric_files::id.eq(request.metric_id))
.select(metric_files::data_metadata)
.first::<Option<DataMetadata>>(&mut conn)
.first::<Option<DataMetadata>>(&mut conn_meta)
.await
.map_err(|e| anyhow!("Error retrieving metadata: {}", e))?;
.map_err(|e| anyhow!("Error retrieving cached metadata: {}", e))?;
tracing::debug!("Cached metadata found: {}", cached_metadata.is_some());
// Execute the query to get the metric data
let query_result = match query_engine::data_source_query_routes::query_engine::query_engine(
@ -102,32 +213,55 @@ pub async fn get_metric_data_handler(
)
.await
{
Ok(result) => result,
Ok(result) => {
tracing::info!(
"Successfully executed metric query. Rows returned: {}",
result.data.len()
);
result
}
Err(e) => {
tracing::error!("Error executing metric query: {}", e);
tracing::error!(
"Error executing metric query for metric {}: {}",
request.metric_id,
e
);
return Err(anyhow!("Error executing metric query: {}", e));
}
};
// Determine which metadata to use
let metadata = if let Some(metadata) = cached_metadata {
// Use cached metadata but update row count if it differs
let final_metadata = if let Some(metadata) = cached_metadata {
tracing::debug!(
"Using cached metadata. Cached rows: {}, Query rows: {}",
metadata.row_count,
query_result.data.len()
);
// Use cached metadata but update row count if it differs significantly or if cached count is 0
// (We update if different because the cache might be stale regarding row count)
if metadata.row_count != query_result.data.len() as i64 {
tracing::debug!("Row count changed. Updating metadata row count.");
let mut updated_metadata = metadata.clone();
updated_metadata.row_count = query_result.data.len() as i64;
// Potentially update updated_at? For now, just row count.
updated_metadata
} else {
metadata
}
} else {
tracing::debug!("No cached metadata found. Using metadata from query result.");
// No cached metadata, use the one from query_result
query_result.metadata.clone()
};
// Construct and return the response
tracing::info!(
"Successfully retrieved data for metric {}. Returning response.",
request.metric_id
);
Ok(MetricDataResponse {
metric_id: request.metric_id,
data: query_result.data,
data_metadata: metadata,
data_metadata: final_metadata,
})
}

View File

@ -0,0 +1,295 @@
use anyhow::{anyhow, Result};
use chrono::Utc;
use database::models::MetricFile;
use diesel::prelude::Queryable;
use diesel::{ExpressionMethods, JoinOnDsl, QueryDsl};
use diesel_async::{AsyncPgConnection, RunQueryDsl};
use futures::future::join;
use serde_yaml;
use uuid::Uuid;
use crate::metrics::types::{AssociatedCollection, AssociatedDashboard, BusterMetric, Dataset};
use database::enums::AssetPermissionRole; // Keep for hardcoding permission
use database::pool::get_pg_pool;
use database::schema::{
collections, collections_to_assets, dashboard_files, datasets, metric_files,
metric_files_to_dashboard_files,
};
use super::Version;
#[derive(Queryable)]
struct DatasetInfo {
id: Uuid,
name: String,
data_source_id: Uuid,
}
/// Fetch ALL dashboards associated with the given metric id (no user filtering)
async fn fetch_associated_dashboards_unfiltered(
metric_id: Uuid,
conn: &mut AsyncPgConnection,
) -> Result<Vec<AssociatedDashboard>> {
let associated_dashboards = metric_files_to_dashboard_files::table
.inner_join(
dashboard_files::table
.on(dashboard_files::id.eq(metric_files_to_dashboard_files::dashboard_file_id)),
)
.filter(metric_files_to_dashboard_files::metric_file_id.eq(metric_id))
.filter(dashboard_files::deleted_at.is_null())
.filter(metric_files_to_dashboard_files::deleted_at.is_null())
// REMOVED: User permission join/filters
.select((dashboard_files::id, dashboard_files::name))
.load::<(Uuid, String)>(conn)
.await?
.into_iter()
.map(|(id, name)| AssociatedDashboard { id, name })
.collect();
Ok(associated_dashboards)
}
/// Fetch ALL collections associated with the given metric id (no user filtering)
async fn fetch_associated_collections_unfiltered(
metric_id: Uuid,
conn: &mut AsyncPgConnection,
) -> Result<Vec<AssociatedCollection>> {
let associated_collections = collections_to_assets::table
.inner_join(collections::table.on(collections::id.eq(collections_to_assets::collection_id)))
// REMOVED: User permission join/filters
.filter(collections_to_assets::asset_id.eq(metric_id))
.filter(collections_to_assets::asset_type.eq(database::enums::AssetType::MetricFile)) // Keep asset type filter
.filter(collections::deleted_at.is_null())
.filter(collections_to_assets::deleted_at.is_null())
.select((collections::id, collections::name))
.load::<(Uuid, String)>(conn)
.await?
.into_iter()
.map(|(id, name)| AssociatedCollection { id, name })
.collect();
Ok(associated_collections)
}
/// Handler to retrieve a metric by ID for display within a dashboard.
/// Assumes authentication/permission checks were done at the dashboard level.
/// Skips all metric-specific permission checks.
pub async fn get_metric_for_dashboard_handler(
metric_id: &Uuid,
version_number: Option<i32>,
) -> Result<BusterMetric> {
let mut conn = get_pg_pool().get().await?;
// 1. Fetch metric file directly by ID - NO PERMISSION CHECK
let metric_file = metric_files::table
.find(metric_id)
.filter(metric_files::deleted_at.is_null())
.first::<MetricFile>(&mut conn)
.await
.map_err(|e| {
tracing::warn!(metric_id = %metric_id, "Metric file not found or DB error during direct fetch: {}", e);
anyhow!("Metric file not found") // Keep error generic
})?;
// --- Permission is implicitly CanView because access is via dashboard ---
let permission = AssetPermissionRole::CanView;
// Declare variables to hold potentially versioned data
let resolved_name: String;
let resolved_description: Option<String>;
let resolved_time_frame: String;
let resolved_dataset_ids: Vec<Uuid>;
let resolved_chart_config: database::types::ChartConfig;
let resolved_sql: String;
let resolved_updated_at: chrono::DateTime<Utc>;
let resolved_version_num: i32;
let resolved_content_for_yaml: database::types::MetricYml;
// Data metadata always comes from the main table record (current state)
let data_metadata: Option<database::types::DataMetadata> = metric_file.data_metadata;
if let Some(requested_version) = version_number {
// --- Specific version requested ---
tracing::debug!(metric_id = %metric_id, version = requested_version, "Attempting to retrieve specific version for dashboard context");
if let Some(v) = metric_file.version_history.get_version(requested_version) {
match &v.content {
database::types::VersionContent::MetricYml(content) => {
let version_content = (**content).clone(); // Deref the Box and clone
resolved_name = version_content.name.clone();
resolved_description = version_content.description.clone();
resolved_time_frame = version_content.time_frame.clone();
resolved_dataset_ids = version_content.dataset_ids.clone();
resolved_chart_config = version_content.chart_config.clone();
resolved_sql = version_content.sql.clone();
resolved_updated_at = v.updated_at;
resolved_version_num = v.version_number;
resolved_content_for_yaml = version_content; // Use this content for YAML
tracing::debug!(metric_id = %metric_id, version = requested_version, "Successfully retrieved specific version content for dashboard");
}
_ => {
tracing::error!(metric_id = %metric_id, version = requested_version, "Invalid content type found for requested version");
return Err(anyhow!(
"Invalid content type found for version {}",
requested_version
));
}
}
} else {
tracing::warn!(metric_id = %metric_id, version = requested_version, "Requested version not found in history");
return Err(anyhow!("Version {} not found", requested_version));
}
} else {
// --- No specific version requested - use current state from the main table row ---
tracing::debug!(metric_id = %metric_id, "No specific version requested, using current metric file content for dashboard");
let current_content = metric_file.content.clone(); // Use the content directly from the fetched MetricFile
resolved_name = metric_file.name.clone(); // Use main record name
resolved_description = current_content.description.clone();
resolved_time_frame = current_content.time_frame.clone();
resolved_dataset_ids = current_content.dataset_ids.clone();
resolved_chart_config = current_content.chart_config.clone();
resolved_sql = current_content.sql.clone();
resolved_updated_at = metric_file.updated_at; // Use main record updated_at
resolved_version_num = metric_file.version_history.get_version_number();
resolved_content_for_yaml = current_content; // Use this content for YAML
tracing::debug!(metric_id = %metric_id, latest_version = resolved_version_num, "Determined latest version number for dashboard");
}
// Convert the selected content to pretty YAML for the 'file' field
let file = match serde_yaml::to_string(&resolved_content_for_yaml) {
Ok(yaml) => yaml,
Err(e) => {
tracing::error!(metric_id = %metric_id, error = %e, "Failed to serialize selected metric content to YAML for dashboard");
return Err(anyhow!("Failed to convert metric content to YAML: {}", e));
}
};
// Map evaluation score - this is not versioned
let evaluation_score = metric_file.evaluation_score.map(|score| {
if score >= 0.8 {
"High".to_string()
} else if score >= 0.5 {
"Moderate".to_string()
} else {
"Low".to_string()
}
});
// Get dataset information for the resolved dataset IDs
let mut datasets = Vec::new();
let mut first_data_source_id = None;
if !resolved_dataset_ids.is_empty() {
// Fetch only if there are IDs to prevent unnecessary query
let dataset_infos = datasets::table
.filter(datasets::id.eq_any(&resolved_dataset_ids))
.filter(datasets::deleted_at.is_null())
.select((datasets::id, datasets::name, datasets::data_source_id))
.load::<DatasetInfo>(&mut conn)
.await
.map_err(|e| {
tracing::error!("Failed to fetch dataset info for metric {}: {}", metric_id, e);
anyhow!("Failed to fetch dataset info")
})?;
for dataset_info in dataset_infos {
datasets.push(Dataset {
id: dataset_info.id.to_string(),
name: dataset_info.name,
});
if first_data_source_id.is_none() {
first_data_source_id = Some(dataset_info.data_source_id);
}
}
}
let mut versions: Vec<Version> = metric_file
.version_history
.0
.values()
.map(|v| Version {
version_number: v.version_number,
updated_at: v.updated_at,
})
.collect();
// Sort versions by version_number in ascending order
versions.sort_by(|a, b| a.version_number.cmp(&b.version_number));
// Concurrently fetch associated dashboards and collections (unfiltered versions)
let metrics_id_clone = *metric_id;
// Await both futures concurrently - NOTE: Need to handle connection borrowing carefully.
// It's safer to get the connection again or pass it differently if needed.
// For now, let's assume the initial `conn` can be reused or handle potential errors.
// A better approach might involve passing the pool and getting connections inside helpers.
// Re-getting connection for safety:
let mut conn_dash = get_pg_pool().get().await?;
let mut conn_coll = get_pg_pool().get().await?;
let dashboards_future = fetch_associated_dashboards_unfiltered(metrics_id_clone, &mut conn_dash);
let collections_future = fetch_associated_collections_unfiltered(metrics_id_clone, &mut conn_coll);
let (dashboards_result, collections_result) = join(dashboards_future, collections_future).await;
// Handle results, logging errors but returning empty Vecs for failed tasks
let dashboards = match dashboards_result {
Ok(dashboards) => dashboards,
Err(e) => {
tracing::error!(
"Failed to fetch associated dashboards (unfiltered) for metric {}: {}",
metric_id,
e
);
vec![]
}
};
let collections = match collections_result {
Ok(collections) => collections,
Err(e) => {
tracing::error!(
"Failed to fetch associated collections (unfiltered) for metric {}: {}",
metric_id,
e
);
vec![]
}
};
// Construct BusterMetric using resolved values
Ok(BusterMetric {
id: metric_file.id,
metric_type: "metric".to_string(),
name: resolved_name,
version_number: resolved_version_num,
description: resolved_description,
file_name: metric_file.file_name,
time_frame: resolved_time_frame,
datasets,
data_source_id: first_data_source_id.map_or("".to_string(), |id| id.to_string()),
error: None, // Assume ok
chart_config: Some(resolved_chart_config),
data_metadata,
status: metric_file.verification,
evaluation_score,
evaluation_summary: metric_file.evaluation_summary.unwrap_or_default(),
file, // YAML based on resolved content
created_at: metric_file.created_at,
updated_at: resolved_updated_at,
sent_by_id: metric_file.created_by,
sent_by_name: "".to_string(), // Placeholder - user info not needed/fetched
sent_by_avatar_url: None, // Placeholder - user info not needed/fetched
code: None, // Placeholder
dashboards, // Unfiltered associations
collections, // Unfiltered associations
versions, // Full version history list
permission, // Hardcoded to CanView
sql: resolved_sql,
// Sharing fields are irrelevant/defaulted in this context
individual_permissions: None,
publicly_accessible: false, // Default value
public_expiry_date: None, // Default value
public_enabled_by: None, // Default value
public_password: None, // Default value
})
}

View File

@ -17,6 +17,7 @@ pub struct MetricsListRequest {
pub page_size: i64,
pub shared_with_me: Option<bool>,
pub only_my_metrics: Option<bool>,
pub verification: Option<Vec<Verification>>,
}
#[derive(Debug, Serialize, Deserialize)]
@ -73,13 +74,19 @@ pub async fn list_metrics_handler(
),
))
.filter(metric_files::deleted_at.is_null())
.distinct()
.order((metric_files::updated_at.desc(), metric_files::id.asc()))
.offset(offset)
.limit(request.page_size)
.into_boxed();
// Add filters based on request parameters
if let Some(verification_statuses) = request.verification {
// Only apply filter if the vec is not empty
if !verification_statuses.is_empty() {
metric_statement = metric_statement.filter(metric_files::verification.eq_any(verification_statuses));
}
}
if let Some(true) = request.only_my_metrics {
// Show only metrics created by the user
metric_statement = metric_statement.filter(metric_files::created_by.eq(&user.id));

View File

@ -6,6 +6,7 @@ pub mod list_metrics_handler;
pub mod sharing;
pub mod types;
pub mod update_metric_handler;
pub mod get_metric_for_dashboard_handler;
// Re-export specific items from handlers
pub use bulk_update_metrics_handler::*;
@ -13,6 +14,7 @@ pub use delete_metric_handler::*;
pub use get_metric_handler::*;
pub use list_metrics_handler::*;
pub use update_metric_handler::*;
pub use get_metric_for_dashboard_handler::get_metric_for_dashboard_handler;
// For get_metric_data_handler, only export the handler functions and request types
// but not the types that conflict with types.rs

View File

@ -5,6 +5,67 @@ use axum::Extension;
use handlers::metrics::{list_metrics_handler, MetricsListRequest, BusterMetricListItem};
use middleware::AuthenticatedUser;
use serde::Deserialize;
use database::enums::Verification;
use serde::de::{self, Deserializer, SeqAccess, Visitor};
use std::fmt;
// Helper function to deserialize Option<String> or Option<Vec<String>> into Option<Vec<String>>
fn deserialize_optional_vec_or_single<'de, D>(deserializer: D) -> Result<Option<Vec<Verification>>, D::Error>
where
D: Deserializer<'de>,
{
struct OptionVecOrSingleVisitor;
impl<'de> Visitor<'de> for OptionVecOrSingleVisitor {
type Value = Option<Vec<Verification>>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a verification status string, a sequence of status strings, or null")
}
// Handle a single string value
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
// Deserialize the single string into Verification
let verification = Verification::deserialize(de::value::StringDeserializer::new(value.to_string()))?;
Ok(Some(vec![verification]))
}
// Handle a sequence of values
fn visit_seq<A>(self, seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
// Deserialize the sequence into Vec<Verification>
let vec = Vec::<Verification>::deserialize(de::value::SeqAccessDeserializer::new(seq))?;
if vec.is_empty() {
Ok(None)
} else {
Ok(Some(vec))
}
}
// Handle null or missing value
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(None)
}
// Handle optional value
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(self)
}
}
deserializer.deserialize_any(OptionVecOrSingleVisitor)
}
#[derive(Deserialize)]
pub struct ListMetricsQuery {
@ -12,6 +73,8 @@ pub struct ListMetricsQuery {
page_size: Option<i64>,
shared_with_me: Option<bool>,
only_my_metrics: Option<bool>,
#[serde(rename = "status[]", deserialize_with = "deserialize_optional_vec_or_single", default)]
verification: Option<Vec<Verification>>,
}
pub async fn list_metrics_rest_handler(
@ -23,6 +86,7 @@ pub async fn list_metrics_rest_handler(
page_size: query.page_size.unwrap_or(25),
shared_with_me: query.shared_with_me,
only_my_metrics: query.only_my_metrics,
verification: query.verification,
};
let metrics = match list_metrics_handler(&user, request).await {