mirror of https://github.com/buster-so/buster.git
prds for bug fixes and doc updates
This commit is contained in:
parent
bedc6b3e51
commit
17e672ef16
|
@ -3,9 +3,11 @@ order and accomplishing the tasks while referencing the prd, its notes, and reco
|
|||
|
||||
make sure to mark off completed tasks as you go.
|
||||
|
||||
you should follow best practices as related in documentation/ for database migrations, testing, handlers,
|
||||
you should follow best practices as related in documentation/ for database migrations, testing.mdc, handlers.mdc,
|
||||
etc. Plase analyze them before you modify a file.
|
||||
|
||||
please analyze all files before proceeding with any implementations.
|
||||
|
||||
you should think hard about your implementation and then implement carefully.
|
||||
|
||||
you are not done until tests for your specific file are finsished and a cargo check runs successfully.
|
|
@ -20,6 +20,185 @@ For consistent mocking across the codebase, use these libraries:
|
|||
|
||||
Always prefer dependency injection patterns to enable easy mocking of dependencies.
|
||||
|
||||
### Test Utilities and Setup
|
||||
|
||||
#### Core Test Types
|
||||
|
||||
The test infrastructure provides several core test utilities for consistent test setup:
|
||||
|
||||
```rust
|
||||
// Example of TestDb and TestSetup implementation
|
||||
pub struct TestDb {
|
||||
pub test_id: String,
|
||||
pub organization_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
initialized: bool,
|
||||
}
|
||||
|
||||
impl TestDb {
|
||||
pub async fn new() -> Result<Self> {
|
||||
// Load environment variables
|
||||
dotenv().ok();
|
||||
|
||||
// Initialize test pools using existing pools
|
||||
let test_id = format!("test-{}", Uuid::new_v4());
|
||||
let organization_id = Uuid::new_v4();
|
||||
let user_id = Uuid::new_v4();
|
||||
|
||||
Ok(Self {
|
||||
test_id,
|
||||
organization_id,
|
||||
user_id,
|
||||
initialized: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Get connections from pre-initialized pools
|
||||
pub async fn diesel_conn(&self) -> Result<AsyncPgConnection> {
|
||||
get_pg_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get diesel connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn sqlx_conn(&self) -> Result<PgConnection> {
|
||||
get_sqlx_pool()
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get sqlx connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn redis_conn(&self) -> Result<RedisConnection> {
|
||||
get_redis_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| antml!("Failed to get redis connection: {}", e))
|
||||
}
|
||||
|
||||
// Create test organization
|
||||
pub async fn create_organization(&self) -> Result<Organization> {
|
||||
let org = Organization {
|
||||
id: Uuid::new_v4(),
|
||||
name: "Test Organization".to_string(),
|
||||
domain: Some("test.org".to_string()),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(organizations::table)
|
||||
.values(&org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(org)
|
||||
}
|
||||
|
||||
// Create test user
|
||||
pub async fn create_user(&self) -> Result<User> {
|
||||
let user = User {
|
||||
id: Uuid::new_v4(),
|
||||
email: format!("test-{}@example.com", Uuid::new_v4()),
|
||||
name: Some("Test User".to_string()),
|
||||
config: json!({}),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
attributes: json!({}),
|
||||
avatar_url: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users::table)
|
||||
.values(&user)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
// Create user-organization relationship
|
||||
pub async fn create_user_to_org(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
org_id: Uuid,
|
||||
role: UserOrganizationRole,
|
||||
) -> Result<UserToOrganization> {
|
||||
let user_org = UserToOrganization {
|
||||
user_id,
|
||||
organization_id: org_id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
created_by: user_id,
|
||||
updated_by: user_id,
|
||||
deleted_by: None,
|
||||
status: UserOrganizationStatus::Active,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users_to_organizations::table)
|
||||
.values(&user_org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user_org)
|
||||
}
|
||||
|
||||
// Create authenticated user for testing
|
||||
pub async fn create_authenticated_user(
|
||||
&self,
|
||||
role: Option<UserOrganizationRole>,
|
||||
) -> Result<(AuthenticatedUser, Organization)> {
|
||||
let org = self.create_organization().await?;
|
||||
let user = self.create_user().await?;
|
||||
|
||||
let role = role.unwrap_or(UserOrganizationRole::Admin);
|
||||
self.create_user_to_org(user.id, org.id, role).await?;
|
||||
|
||||
let auth_user = AuthenticatedUser {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
name: user.name,
|
||||
organization_id: org.id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
};
|
||||
|
||||
Ok((auth_user, org))
|
||||
}
|
||||
}
|
||||
|
||||
// Helper struct for test setup
|
||||
pub struct TestSetup {
|
||||
pub user: AuthenticatedUser,
|
||||
pub organization: Organization,
|
||||
pub db: TestDb,
|
||||
}
|
||||
|
||||
impl TestSetup {
|
||||
pub async fn new(role: Option<UserOrganizationRole>) -> Result<Self> {
|
||||
let test_db = TestDb::new().await?;
|
||||
let (user, org) = test_db.create_authenticated_user(role).await?;
|
||||
|
||||
Ok(Self {
|
||||
user,
|
||||
organization: org,
|
||||
db: test_db,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
### Library Code Testing Structure
|
||||
- **Unit Tests**: Include unit tests inside library source files using `#[cfg(test)]` modules
|
||||
- **Integration Tests**: Place integration tests in the lib's `tests/` directory
|
||||
|
@ -215,8 +394,7 @@ Integration tests are specifically designed to verify the interaction with exter
|
|||
- **Configuration**:
|
||||
- Configuration should come from environment variables via `.env` files
|
||||
- Use `dotenv` to load environment variables during test setup
|
||||
- Example: Database connection parameters should come from `.env`
|
||||
- Prefer `.env.test` for test-specific configurations
|
||||
- Example: Database connection parameters should come from- Prefer `.env.test` forific configurations
|
||||
|
||||
```rust
|
||||
// Example of proper integration test setup
|
||||
|
@ -379,8 +557,8 @@ async fn test_user_creation() -> Result<()> {
|
|||
```
|
||||
|
||||
#### Database Setup
|
||||
- All integration tests must import and utilize the application's schema from [schema.rs](mdc:src/database/schema.rs)
|
||||
- Database models from [models.rs](mdc:src/database/models.rs) should be used for test data setup and verification
|
||||
- All integration tests must import and utilize the application's schema from @schema.rs
|
||||
- Database models from @models.rs should be used for test data setup and verification
|
||||
- Use the `testkit` library for test IDs and directly access pools from the database library:
|
||||
```rust
|
||||
// tests/common/db.rs
|
||||
|
@ -1710,5 +1888,4 @@ async fn test_user_api() -> Result<()> {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
```
|
|
@ -20,6 +20,185 @@ For consistent mocking across the codebase, use these libraries:
|
|||
|
||||
Always prefer dependency injection patterns to enable easy mocking of dependencies.
|
||||
|
||||
### Test Utilities and Setup
|
||||
|
||||
#### Core Test Types
|
||||
|
||||
The test infrastructure provides several core test utilities for consistent test setup:
|
||||
|
||||
```rust
|
||||
// Example of TestDb and TestSetup implementation
|
||||
pub struct TestDb {
|
||||
pub test_id: String,
|
||||
pub organization_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
initialized: bool,
|
||||
}
|
||||
|
||||
impl TestDb {
|
||||
pub async fn new() -> Result<Self> {
|
||||
// Load environment variables
|
||||
dotenv().ok();
|
||||
|
||||
// Initialize test pools using existing pools
|
||||
let test_id = format!("test-{}", Uuid::new_v4());
|
||||
let organization_id = Uuid::new_v4();
|
||||
let user_id = Uuid::new_v4();
|
||||
|
||||
Ok(Self {
|
||||
test_id,
|
||||
organization_id,
|
||||
user_id,
|
||||
initialized: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Get connections from pre-initialized pools
|
||||
pub async fn diesel_conn(&self) -> Result<AsyncPgConnection> {
|
||||
get_pg_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get diesel connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn sqlx_conn(&self) -> Result<PgConnection> {
|
||||
get_sqlx_pool()
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get sqlx connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn redis_conn(&self) -> Result<RedisConnection> {
|
||||
get_redis_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| antml!("Failed to get redis connection: {}", e))
|
||||
}
|
||||
|
||||
// Create test organization
|
||||
pub async fn create_organization(&self) -> Result<Organization> {
|
||||
let org = Organization {
|
||||
id: Uuid::new_v4(),
|
||||
name: "Test Organization".to_string(),
|
||||
domain: Some("test.org".to_string()),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(organizations::table)
|
||||
.values(&org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(org)
|
||||
}
|
||||
|
||||
// Create test user
|
||||
pub async fn create_user(&self) -> Result<User> {
|
||||
let user = User {
|
||||
id: Uuid::new_v4(),
|
||||
email: format!("test-{}@example.com", Uuid::new_v4()),
|
||||
name: Some("Test User".to_string()),
|
||||
config: json!({}),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
attributes: json!({}),
|
||||
avatar_url: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users::table)
|
||||
.values(&user)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
// Create user-organization relationship
|
||||
pub async fn create_user_to_org(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
org_id: Uuid,
|
||||
role: UserOrganizationRole,
|
||||
) -> Result<UserToOrganization> {
|
||||
let user_org = UserToOrganization {
|
||||
user_id,
|
||||
organization_id: org_id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
created_by: user_id,
|
||||
updated_by: user_id,
|
||||
deleted_by: None,
|
||||
status: UserOrganizationStatus::Active,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users_to_organizations::table)
|
||||
.values(&user_org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user_org)
|
||||
}
|
||||
|
||||
// Create authenticated user for testing
|
||||
pub async fn create_authenticated_user(
|
||||
&self,
|
||||
role: Option<UserOrganizationRole>,
|
||||
) -> Result<(AuthenticatedUser, Organization)> {
|
||||
let org = self.create_organization().await?;
|
||||
let user = self.create_user().await?;
|
||||
|
||||
let role = role.unwrap_or(UserOrganizationRole::Admin);
|
||||
self.create_user_to_org(user.id, org.id, role).await?;
|
||||
|
||||
let auth_user = AuthenticatedUser {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
name: user.name,
|
||||
organization_id: org.id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
};
|
||||
|
||||
Ok((auth_user, org))
|
||||
}
|
||||
}
|
||||
|
||||
// Helper struct for test setup
|
||||
pub struct TestSetup {
|
||||
pub user: AuthenticatedUser,
|
||||
pub organization: Organization,
|
||||
pub db: TestDb,
|
||||
}
|
||||
|
||||
impl TestSetup {
|
||||
pub async fn new(role: Option<UserOrganizationRole>) -> Result<Self> {
|
||||
let test_db = TestDb::new().await?;
|
||||
let (user, org) = test_db.create_authenticated_user(role).await?;
|
||||
|
||||
Ok(Self {
|
||||
user,
|
||||
organization: org,
|
||||
db: test_db,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
### Library Code Testing Structure
|
||||
- **Unit Tests**: Include unit tests inside library source files using `#[cfg(test)]` modules
|
||||
- **Integration Tests**: Place integration tests in the lib's `tests/` directory
|
||||
|
@ -1710,5 +1889,4 @@ async fn test_user_api() -> Result<()> {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
```
|
|
@ -0,0 +1,304 @@
|
|||
---
|
||||
title: HTTP Status Code Fix
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
parent_prd: project_bug_fixes_and_testing.md
|
||||
ticket: BUS-1067
|
||||
---
|
||||
|
||||
# HTTP Status Code Fix
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The API is currently returning incorrect HTTP status codes in several scenarios, particularly in error cases. This inconsistency makes it difficult for clients to properly handle errors and leads to confusion in error handling. The main issues are:
|
||||
|
||||
Current behavior:
|
||||
- Some error responses return 200 OK with error in body
|
||||
- Inconsistent use of 4xx status codes
|
||||
- Missing proper status codes for specific error cases
|
||||
- Lack of standardization across handlers
|
||||
|
||||
Expected behavior:
|
||||
- Proper HTTP status codes for all responses
|
||||
- Consistent error status codes
|
||||
- Clear mapping between error types and status codes
|
||||
- Standardized error response format
|
||||
|
||||
## Goals
|
||||
|
||||
1. Standardize HTTP status codes across all handlers
|
||||
2. Implement proper error status codes
|
||||
3. Create error-to-status code mapping
|
||||
4. Add tests for status code verification
|
||||
5. Document status code usage
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Changing error message format
|
||||
2. Adding new error types
|
||||
3. Modifying success response format
|
||||
4. Changing API contracts
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The fix involves creating a standardized error-to-status code mapping and updating all handlers to use this mapping consistently.
|
||||
|
||||
### Error Status Code Mapping
|
||||
|
||||
```rust
|
||||
// libs/handlers/src/error.rs
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum HandlerError {
|
||||
NotFound(String),
|
||||
Unauthorized(String),
|
||||
Forbidden(String),
|
||||
BadRequest(String),
|
||||
Conflict(String),
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
impl HandlerError {
|
||||
pub fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
HandlerError::NotFound(_) => StatusCode::NOT_FOUND,
|
||||
HandlerError::Unauthorized(_) => StatusCode::UNAUTHORIZED,
|
||||
HandlerError::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||
HandlerError::BadRequest(_) => StatusCode::BAD_REQUEST,
|
||||
HandlerError::Conflict(_) => StatusCode::CONFLICT,
|
||||
HandlerError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HandlerError> for Response {
|
||||
fn from(error: HandlerError) -> Self {
|
||||
let status = error.status_code();
|
||||
let body = json!({
|
||||
"error": {
|
||||
"message": error.to_string(),
|
||||
"code": status.as_u16()
|
||||
}
|
||||
});
|
||||
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(body.to_string())
|
||||
.unwrap_or_else(|_| {
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body("Internal server error".to_string())
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Handler Updates
|
||||
|
||||
Example handler update:
|
||||
|
||||
```rust
|
||||
// libs/handlers/src/assets/get_asset.rs
|
||||
|
||||
pub async fn get_asset_handler(
|
||||
asset_id: &Uuid,
|
||||
user: &AuthenticatedUser,
|
||||
) -> Result<Response, HandlerError> {
|
||||
let asset = match Asset::find_by_id(asset_id).await {
|
||||
Ok(asset) => asset,
|
||||
Err(_) => return Err(HandlerError::NotFound(
|
||||
format!("Asset {} not found", asset_id)
|
||||
)),
|
||||
};
|
||||
|
||||
if !user.can_view(&asset) {
|
||||
return Err(HandlerError::Forbidden(
|
||||
"User does not have permission to view this asset".to_string()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(json!(asset).to_string())
|
||||
.unwrap())
|
||||
}
|
||||
```
|
||||
|
||||
### Test Cases
|
||||
|
||||
```rust
|
||||
// libs/handlers/tests/error_status_test.rs
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_not_found_status() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
let fake_id = Uuid::new_v4();
|
||||
|
||||
let response = get_asset_handler(
|
||||
&fake_id,
|
||||
&setup.user
|
||||
).await;
|
||||
|
||||
assert!(response.is_err());
|
||||
let err = response.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::NOT_FOUND);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_forbidden_status() -> Result<()> {
|
||||
// Create test setup with viewer role
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Viewer)).await?;
|
||||
|
||||
// Create asset without permissions
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
let response = get_asset_handler(
|
||||
&asset_id,
|
||||
&setup.user
|
||||
).await;
|
||||
|
||||
assert!(response.is_err());
|
||||
let err = response.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::FORBIDDEN);
|
||||
|
||||
// Verify error is logged
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let error_log = error_logs::table
|
||||
.filter(error_logs::asset_id.eq(asset_id))
|
||||
.filter(error_logs::user_id.eq(setup.user.id))
|
||||
.first::<ErrorLog>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(error_log.status_code, StatusCode::FORBIDDEN.as_u16() as i32);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_response_format() -> Result<()> {
|
||||
// Create test setup with viewer role
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Viewer)).await?;
|
||||
|
||||
// Create test asset
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
let response = get_asset_handler(
|
||||
&asset_id,
|
||||
&setup.user
|
||||
).await;
|
||||
|
||||
assert!(response.is_err());
|
||||
let err = response.unwrap_err();
|
||||
|
||||
// Convert error to response
|
||||
let error_response: Response = err.into();
|
||||
|
||||
// Verify response format
|
||||
let body = hyper::body::to_bytes(error_response.into_body()).await?;
|
||||
let error_json: serde_json::Value = serde_json::from_slice(&body)?;
|
||||
|
||||
assert!(error_json.get("error").is_some());
|
||||
assert!(error_json["error"].get("message").is_some());
|
||||
assert!(error_json["error"].get("code").is_some());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Test infrastructure from [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
2. Existing handler implementations
|
||||
3. HTTP status code definitions
|
||||
4. Error type definitions
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Error Type Updates
|
||||
|
||||
1. Create/update error types
|
||||
2. Implement status code mapping
|
||||
3. Add error response formatting
|
||||
4. Update documentation
|
||||
|
||||
### Phase 2: Handler Updates
|
||||
|
||||
1. Update handlers to use new error types
|
||||
2. Add proper status code returns
|
||||
3. Implement error handling
|
||||
4. Add tests
|
||||
|
||||
### Phase 3: Testing
|
||||
|
||||
1. Add status code tests
|
||||
2. Test error scenarios
|
||||
3. Verify response formats
|
||||
4. Test edge cases
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Test error type mapping
|
||||
- Test status code assignment
|
||||
- Test error response format
|
||||
- Test handler error cases
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test complete request flow
|
||||
- Verify status codes
|
||||
- Test error scenarios
|
||||
- Test response format
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. All handlers return correct status codes
|
||||
2. Error responses are properly formatted
|
||||
3. Tests pass for all scenarios
|
||||
4. Documentation is updated
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Implement error type changes
|
||||
2. Update handlers incrementally
|
||||
3. Deploy to staging
|
||||
4. Monitor for issues
|
||||
5. Deploy to production
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related Files
|
||||
|
||||
- `libs/handlers/src/error.rs`
|
||||
- `libs/handlers/src/assets/*.rs`
|
||||
- `libs/handlers/tests/error_status_test.rs`
|
||||
- `libs/handlers/tests/assets/*.rs`
|
||||
|
||||
### HTTP Status Code Reference
|
||||
|
||||
Common status codes used:
|
||||
- 200: OK
|
||||
- 201: Created
|
||||
- 400: Bad Request
|
||||
- 401: Unauthorized
|
||||
- 403: Forbidden
|
||||
- 404: Not Found
|
||||
- 409: Conflict
|
||||
- 500: Internal Server Error
|
|
@ -0,0 +1,284 @@
|
|||
---
|
||||
title: Metric Status Update Fix
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
parent_prd: project_bug_fixes_and_testing.md
|
||||
ticket: BUS-1069
|
||||
---
|
||||
|
||||
# Metric Status Update Fix
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The metric status field is not being properly propagated to the metric_file object when updates are made. Currently, when a metric is updated through the `update_metric_handler`, the status field in the request is not being included in the database update operation.
|
||||
|
||||
Current behavior:
|
||||
- Status field in update request is ignored
|
||||
- Metric file status remains unchanged
|
||||
- No validation of status values
|
||||
|
||||
Expected behavior:
|
||||
- Status field from request updates metric file
|
||||
- Status changes are persisted
|
||||
- Status values are validated
|
||||
|
||||
## Goals
|
||||
|
||||
1. Fix status field propagation in update handler
|
||||
2. Add status field validation
|
||||
3. Add tests for status updates
|
||||
4. Document status field behavior
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Changing status field schema
|
||||
2. Adding new status values
|
||||
3. Modifying status field behavior
|
||||
4. Adding status-related features
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The fix involves modifying the `update_metric_handler` to properly handle the status field and adding appropriate tests using the new test infrastructure.
|
||||
|
||||
### File Changes
|
||||
|
||||
1. Update the update handler:
|
||||
|
||||
```rust
|
||||
// libs/handlers/src/metrics/update_metric_handler.rs
|
||||
|
||||
pub async fn update_metric_handler(
|
||||
metric_id: &Uuid,
|
||||
user: &AuthenticatedUser,
|
||||
request: UpdateMetricRequest,
|
||||
) -> Result<BusterMetric> {
|
||||
// ... existing permission checks ...
|
||||
|
||||
// Build update query - include verification in main update
|
||||
let builder = diesel::update(metric_files::table)
|
||||
.filter(metric_files::id.eq(metric_id))
|
||||
.filter(metric_files::deleted_at.is_null());
|
||||
|
||||
// Update based on whether verification and metadata are provided
|
||||
let update_result = if let Some(verification) = request.verification {
|
||||
if let Some(metadata) = data_metadata {
|
||||
builder
|
||||
.set((
|
||||
metric_files::name.eq(content.name.clone()),
|
||||
metric_files::verification.eq(verification), // Include verification
|
||||
metric_files::content.eq(content_json),
|
||||
metric_files::updated_at.eq(Utc::now()),
|
||||
metric_files::version_history.eq(current_version_history),
|
||||
metric_files::data_metadata.eq(metadata),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
} else {
|
||||
builder
|
||||
.set((
|
||||
metric_files::name.eq(content.name.clone()),
|
||||
metric_files::verification.eq(verification), // Include verification
|
||||
metric_files::content.eq(content_json),
|
||||
metric_files::updated_at.eq(Utc::now()),
|
||||
metric_files::version_history.eq(current_version_history),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
}
|
||||
} else {
|
||||
if let Some(metadata) = data_metadata {
|
||||
builder
|
||||
.set((
|
||||
metric_files::name.eq(content.name.clone()),
|
||||
metric_files::content.eq(content_json),
|
||||
metric_files::updated_at.eq(Utc::now()),
|
||||
metric_files::version_history.eq(current_version_history),
|
||||
metric_files::data_metadata.eq(metadata),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
} else {
|
||||
builder
|
||||
.set((
|
||||
metric_files::name.eq(content.name.clone()),
|
||||
metric_files::content.eq(content_json),
|
||||
metric_files::updated_at.eq(Utc::now()),
|
||||
metric_files::version_history.eq(current_version_history),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
}
|
||||
};
|
||||
|
||||
match update_result {
|
||||
Ok(_) => get_metric_handler(metric_id, user, None).await,
|
||||
Err(e) => Err(anyhow!("Failed to update metric: {}", e)),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. Add tests:
|
||||
|
||||
```rust
|
||||
// libs/handlers/tests/metrics/update_metric_test.rs
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_metric_status() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create test metric
|
||||
let metric_id = AssetTestHelpers::create_test_metric(
|
||||
&setup.db,
|
||||
"Test Metric",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add owner permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
metric_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
// Create update request with new status
|
||||
let request = UpdateMetricRequest {
|
||||
verification: Some(Verification::Verified),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Update metric
|
||||
let updated_metric = update_metric_handler(
|
||||
&metric_id,
|
||||
&setup.user,
|
||||
request
|
||||
).await?;
|
||||
|
||||
// Verify status was updated
|
||||
assert_eq!(updated_metric.status, Verification::Verified);
|
||||
|
||||
// Verify database was updated
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let db_metric = metric_files::table
|
||||
.find(metric_id)
|
||||
.first::<MetricFile>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(db_metric.verification, Verification::Verified);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_metric_status_unauthorized() -> Result<()> {
|
||||
// Create test setup with viewer role
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Viewer)).await?;
|
||||
|
||||
// Create test metric
|
||||
let metric_id = AssetTestHelpers::create_test_metric(
|
||||
&setup.db,
|
||||
"Test Metric",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add view-only permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
metric_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::CanView
|
||||
).await?;
|
||||
|
||||
// Create update request with new status
|
||||
let request = UpdateMetricRequest {
|
||||
verification: Some(Verification::Verified),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Attempt update should fail
|
||||
let result = update_metric_handler(
|
||||
&metric_id,
|
||||
&setup.user,
|
||||
request
|
||||
).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
|
||||
// Verify status was not updated
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let db_metric = metric_files::table
|
||||
.find(metric_id)
|
||||
.first::<MetricFile>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_ne!(db_metric.verification, Verification::Verified);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Test infrastructure from [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
2. Existing metric update handler
|
||||
3. Database schema and models
|
||||
4. Permission system
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Fix Implementation
|
||||
|
||||
1. Update the handler code to include status field
|
||||
2. Add validation for status values
|
||||
3. Ensure proper error handling
|
||||
4. Update documentation
|
||||
|
||||
### Phase 2: Testing
|
||||
|
||||
1. Implement test cases using new infrastructure
|
||||
2. Test all status update scenarios
|
||||
3. Test permission requirements
|
||||
4. Test error cases
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Test status field validation
|
||||
- Test permission requirements
|
||||
- Test error handling
|
||||
- Test default behavior
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test complete update workflow
|
||||
- Verify database state
|
||||
- Test concurrent updates
|
||||
- Test version history
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. Status updates are persisted correctly
|
||||
2. All tests pass
|
||||
3. No regressions in other functionality
|
||||
4. Documentation is updated
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Implement fix with tests
|
||||
2. Review and validate changes
|
||||
3. Deploy to staging
|
||||
4. Monitor for issues
|
||||
5. Deploy to production
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related Files
|
||||
|
||||
- `libs/handlers/src/metrics/update_metric_handler.rs`
|
||||
- `libs/handlers/tests/metrics/update_metric_test.rs`
|
||||
- `libs/database/schema.rs`
|
|
@ -0,0 +1,386 @@
|
|||
---
|
||||
title: Permission Field Fix
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
parent_prd: project_bug_fixes_and_testing.md
|
||||
ticket: BUS-1063
|
||||
---
|
||||
|
||||
# Permission Field Fix
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The permission field in asset responses is inconsistent and sometimes missing required information. This causes issues with permission checking and display in the UI. The current implementation has several issues:
|
||||
|
||||
Current behavior:
|
||||
- Permission field sometimes missing from responses
|
||||
- Inconsistent permission field format
|
||||
- Missing role information
|
||||
- Incorrect inherited permissions
|
||||
|
||||
Expected behavior:
|
||||
- Consistent permission field presence
|
||||
- Standardized permission format
|
||||
- Complete role information
|
||||
- Proper inheritance handling
|
||||
|
||||
## Goals
|
||||
|
||||
1. Fix permission field consistency
|
||||
2. Standardize permission format
|
||||
3. Add complete role information
|
||||
4. Fix permission inheritance
|
||||
5. Add permission field tests
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Adding new permission types
|
||||
2. Changing permission model
|
||||
3. Modifying permission UI
|
||||
4. Adding new permission features
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The fix involves standardizing the permission field format and ensuring consistent inclusion in responses.
|
||||
|
||||
### Permission Field Structure
|
||||
|
||||
```rust
|
||||
// libs/models/src/permissions.rs
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct PermissionField {
|
||||
pub role: AssetPermissionRole,
|
||||
pub inherited: bool,
|
||||
pub inherited_from: Option<Uuid>,
|
||||
pub granted_at: DateTime<Utc>,
|
||||
pub granted_by: Option<Uuid>,
|
||||
}
|
||||
|
||||
impl PermissionField {
|
||||
pub fn new(
|
||||
role: AssetPermissionRole,
|
||||
inherited: bool,
|
||||
inherited_from: Option<Uuid>,
|
||||
granted_by: Option<Uuid>,
|
||||
) -> Self {
|
||||
Self {
|
||||
role,
|
||||
inherited,
|
||||
inherited_from,
|
||||
granted_at: Utc::now(),
|
||||
granted_by,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_permission(permission: &Permission) -> Self {
|
||||
Self {
|
||||
role: permission.role,
|
||||
inherited: permission.inherited,
|
||||
inherited_from: permission.inherited_from,
|
||||
granted_at: permission.granted_at,
|
||||
granted_by: permission.granted_by,
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Asset Response Update
|
||||
|
||||
```rust
|
||||
// libs/models/src/asset.rs
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct AssetResponse {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub permission: PermissionField, // Now required
|
||||
// ... other fields ...
|
||||
}
|
||||
|
||||
impl Asset {
|
||||
pub async fn to_response(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
conn: &mut PgConnection
|
||||
) -> Result<AssetResponse> {
|
||||
// Get user's permission
|
||||
let permission = Permission::find_for_user(
|
||||
self.id,
|
||||
user_id,
|
||||
conn
|
||||
).await?;
|
||||
|
||||
Ok(AssetResponse {
|
||||
id: self.id,
|
||||
name: self.name.clone(),
|
||||
created_at: self.created_at,
|
||||
updated_at: self.updated_at,
|
||||
permission: PermissionField::from_permission(&permission),
|
||||
// ... other fields ...
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Test Cases
|
||||
|
||||
```rust
|
||||
// libs/models/tests/permission_field_test.rs
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permission_field_direct() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create test asset
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add direct permission
|
||||
let permission = PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
asset_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
// Get asset response
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let asset = Asset::find_by_id(asset_id).await?;
|
||||
let response = asset.to_response(
|
||||
setup.user.id,
|
||||
&mut conn
|
||||
).await?;
|
||||
|
||||
// Verify permission field
|
||||
assert_eq!(response.permission.role, AssetPermissionRole::Owner);
|
||||
assert!(!response.permission.inherited);
|
||||
assert!(response.permission.inherited_from.is_none());
|
||||
assert_eq!(
|
||||
response.permission.granted_by,
|
||||
Some(setup.user.id)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permission_field_inherited() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create parent asset
|
||||
let parent_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Parent Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Create child asset
|
||||
let child_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Child Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add parent permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
parent_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
// Set inheritance
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
diesel::update(assets::table)
|
||||
.filter(assets::id.eq(child_id))
|
||||
.set(assets::parent_id.eq(parent_id))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
// Get child asset response
|
||||
let child = Asset::find_by_id(child_id).await?;
|
||||
let response = child.to_response(
|
||||
setup.user.id,
|
||||
&mut conn
|
||||
).await?;
|
||||
|
||||
// Verify inherited permission field
|
||||
assert_eq!(response.permission.role, AssetPermissionRole::Owner);
|
||||
assert!(response.permission.inherited);
|
||||
assert_eq!(response.permission.inherited_from, Some(parent_id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permission_field_missing() -> Result<()> {
|
||||
// Create test setup with viewer role
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Viewer)).await?;
|
||||
|
||||
// Create asset without permission
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Try to get asset response
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let asset = Asset::find_by_id(asset_id).await?;
|
||||
let result = asset.to_response(
|
||||
setup.user.id,
|
||||
&mut conn
|
||||
).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
|
||||
// Verify error is logged
|
||||
let error_log = error_logs::table
|
||||
.filter(error_logs::asset_id.eq(asset_id))
|
||||
.filter(error_logs::user_id.eq(setup.user.id))
|
||||
.first::<ErrorLog>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(error_log.error_type, "missing_permission");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permission_field_serialization() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create test asset
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
asset_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
// Get asset response
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let asset = Asset::find_by_id(asset_id).await?;
|
||||
let response = asset.to_response(
|
||||
setup.user.id,
|
||||
&mut conn
|
||||
).await?;
|
||||
|
||||
// Serialize response
|
||||
let json = serde_json::to_value(&response)?;
|
||||
|
||||
// Verify permission field structure
|
||||
assert!(json.get("permission").is_some());
|
||||
assert_eq!(
|
||||
json["permission"]["role"].as_str(),
|
||||
Some("owner")
|
||||
);
|
||||
assert_eq!(
|
||||
json["permission"]["inherited"].as_bool(),
|
||||
Some(false)
|
||||
);
|
||||
assert!(json["permission"]["inherited_from"].is_null());
|
||||
assert!(json["permission"]["granted_at"].is_string());
|
||||
assert!(json["permission"]["granted_by"].is_string());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Test infrastructure from [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
2. Existing permission system
|
||||
3. Asset response handling
|
||||
4. Error handling system from [HTTP Status Code Fix](api_http_status_fix.md)
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Field Structure
|
||||
|
||||
1. Implement permission field structure
|
||||
2. Add field serialization
|
||||
3. Update response types
|
||||
4. Document field format
|
||||
|
||||
### Phase 2: Response Updates
|
||||
|
||||
1. Update asset responses
|
||||
2. Add permission field
|
||||
3. Handle missing permissions
|
||||
4. Add response tests
|
||||
|
||||
### Phase 3: Testing
|
||||
|
||||
1. Add field tests
|
||||
2. Test inheritance
|
||||
3. Test error cases
|
||||
4. Test edge cases
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Test field structure
|
||||
- Test serialization
|
||||
- Test inheritance
|
||||
- Test missing permissions
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test response format
|
||||
- Test permission flow
|
||||
- Test error handling
|
||||
- Test complete asset flow
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. Permission field is consistent
|
||||
2. Inheritance works correctly
|
||||
3. Tests pass for all scenarios
|
||||
4. Documentation is updated
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Implement field changes
|
||||
2. Update responses
|
||||
3. Deploy to staging
|
||||
4. Monitor for issues
|
||||
5. Deploy to production
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related Files
|
||||
|
||||
- `libs/models/src/permissions.rs`
|
||||
- `libs/models/src/asset.rs`
|
||||
- `libs/models/tests/permission_field_test.rs`
|
||||
- `libs/handlers/src/assets/*.rs`
|
||||
|
||||
### Permission Field Reference
|
||||
|
||||
Required fields:
|
||||
- role: The user's role for the asset
|
||||
- inherited: Whether the permission is inherited
|
||||
- inherited_from: Parent asset ID if inherited
|
||||
- granted_at: When the permission was granted
|
||||
- granted_by: User ID who granted the permission
|
|
@ -0,0 +1,357 @@
|
|||
---
|
||||
title: Public Sharing Parameters Fix
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
parent_prd: project_bug_fixes_and_testing.md
|
||||
ticket: BUS-1064
|
||||
---
|
||||
|
||||
# Public Sharing Parameters Fix
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The public sharing functionality is not properly handling sharing parameters, particularly in the context of asset visibility and access control. The current implementation has several issues:
|
||||
|
||||
Current behavior:
|
||||
- Public sharing parameters are not properly validated
|
||||
- Inconsistent handling of visibility settings
|
||||
- Missing checks for valid sharing configurations
|
||||
- Lack of proper error handling for invalid parameters
|
||||
|
||||
Expected behavior:
|
||||
- Proper validation of all sharing parameters
|
||||
- Consistent visibility handling
|
||||
- Clear error messages for invalid configurations
|
||||
- Proper access control enforcement
|
||||
|
||||
## Goals
|
||||
|
||||
1. Fix public sharing parameter validation
|
||||
2. Implement proper visibility checks
|
||||
3. Add comprehensive parameter validation
|
||||
4. Improve error handling
|
||||
5. Add tests for sharing scenarios
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Adding new sharing features
|
||||
2. Modifying sharing UI
|
||||
3. Changing sharing model
|
||||
4. Adding new permission types
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The fix involves updating the sharing parameter validation logic and implementing proper checks for visibility settings.
|
||||
|
||||
### Parameter Validation
|
||||
|
||||
```rust
|
||||
// libs/handlers/src/sharing/validate.rs
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SharingParameters {
|
||||
pub is_public: bool,
|
||||
pub allow_anonymous: bool,
|
||||
pub expiration: Option<DateTime<Utc>>,
|
||||
pub access_level: AccessLevel,
|
||||
}
|
||||
|
||||
impl SharingParameters {
|
||||
pub fn validate(&self) -> Result<(), HandlerError> {
|
||||
// Check for valid combinations
|
||||
if self.is_public && !self.allow_anonymous {
|
||||
return Err(HandlerError::BadRequest(
|
||||
"Public sharing must allow anonymous access".to_string()
|
||||
));
|
||||
}
|
||||
|
||||
// Validate expiration
|
||||
if let Some(exp) = self.expiration {
|
||||
if exp < Utc::now() {
|
||||
return Err(HandlerError::BadRequest(
|
||||
"Expiration date must be in the future".to_string()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Validate access level
|
||||
if self.is_public && self.access_level > AccessLevel::ReadOnly {
|
||||
return Err(HandlerError::BadRequest(
|
||||
"Public sharing cannot grant write access".to_string()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Sharing Handler Update
|
||||
|
||||
```rust
|
||||
// libs/handlers/src/sharing/update_sharing.rs
|
||||
|
||||
pub async fn update_sharing_handler(
|
||||
asset_id: &Uuid,
|
||||
user: &AuthenticatedUser,
|
||||
params: SharingParameters,
|
||||
) -> Result<Response, HandlerError> {
|
||||
// Validate parameters
|
||||
params.validate()?;
|
||||
|
||||
// Check user permissions
|
||||
let asset = Asset::find_by_id(asset_id).await?;
|
||||
if !user.can_manage_sharing(&asset) {
|
||||
return Err(HandlerError::Forbidden(
|
||||
"User does not have permission to update sharing settings".to_string()
|
||||
));
|
||||
}
|
||||
|
||||
// Update sharing settings
|
||||
asset.update_sharing(params).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(json!({"status": "success"}).to_string())
|
||||
.unwrap())
|
||||
}
|
||||
```
|
||||
|
||||
### Test Cases
|
||||
|
||||
```rust
|
||||
// libs/handlers/tests/sharing/sharing_params_test.rs
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_public_sharing() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
let params = SharingParameters {
|
||||
is_public: true,
|
||||
allow_anonymous: false,
|
||||
expiration: None,
|
||||
access_level: AccessLevel::ReadOnly,
|
||||
};
|
||||
|
||||
let result = params.validate();
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_expired_sharing() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
let params = SharingParameters {
|
||||
is_public: true,
|
||||
allow_anonymous: true,
|
||||
expiration: Some(Utc::now() - Duration::hours(1)),
|
||||
access_level: AccessLevel::ReadOnly,
|
||||
};
|
||||
|
||||
let result = params.validate();
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_public_access() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
let params = SharingParameters {
|
||||
is_public: true,
|
||||
allow_anonymous: true,
|
||||
expiration: None,
|
||||
access_level: AccessLevel::ReadWrite,
|
||||
};
|
||||
|
||||
let result = params.validate();
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_valid_sharing() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create test asset
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add owner permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
asset_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
let params = SharingParameters {
|
||||
is_public: true,
|
||||
allow_anonymous: true,
|
||||
expiration: Some(Utc::now() + Duration::days(7)),
|
||||
access_level: AccessLevel::ReadOnly,
|
||||
};
|
||||
|
||||
let response = update_sharing_handler(
|
||||
&asset_id,
|
||||
&setup.user,
|
||||
params.clone()
|
||||
).await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
|
||||
// Verify sharing settings in database
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let sharing = sharing_settings::table
|
||||
.filter(sharing_settings::asset_id.eq(asset_id))
|
||||
.first::<SharingSettings>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(sharing.is_public, params.is_public);
|
||||
assert_eq!(sharing.allow_anonymous, params.allow_anonymous);
|
||||
assert_eq!(sharing.access_level, params.access_level);
|
||||
assert_eq!(sharing.expiration, params.expiration);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sharing_update_history() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Create test asset
|
||||
let asset_id = AssetTestHelpers::create_test_asset(
|
||||
&setup.db,
|
||||
"Test Asset",
|
||||
setup.organization.id
|
||||
).await?;
|
||||
|
||||
// Add owner permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&setup.db,
|
||||
asset_id,
|
||||
setup.user.id,
|
||||
AssetPermissionRole::Owner
|
||||
).await?;
|
||||
|
||||
let params = SharingParameters {
|
||||
is_public: true,
|
||||
allow_anonymous: true,
|
||||
expiration: None,
|
||||
access_level: AccessLevel::ReadOnly,
|
||||
};
|
||||
|
||||
// Update sharing settings
|
||||
update_sharing_handler(
|
||||
&asset_id,
|
||||
&setup.user,
|
||||
params
|
||||
).await?;
|
||||
|
||||
// Verify history entry in database
|
||||
let mut conn = setup.db.diesel_conn().await?;
|
||||
let history = sharing_history::table
|
||||
.filter(sharing_history::asset_id.eq(asset_id))
|
||||
.order_by(sharing_history::created_at.desc())
|
||||
.first::<SharingHistory>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(history.user_id, setup.user.id);
|
||||
assert_eq!(history.action, "update");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Test infrastructure from [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
2. Existing sharing implementation
|
||||
3. Permission system
|
||||
4. Error handling system from [HTTP Status Code Fix](api_http_status_fix.md)
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Parameter Validation
|
||||
|
||||
1. Implement parameter validation
|
||||
2. Add validation tests
|
||||
3. Update error handling
|
||||
4. Document validation rules
|
||||
|
||||
### Phase 2: Handler Updates
|
||||
|
||||
1. Update sharing handlers
|
||||
2. Add validation checks
|
||||
3. Implement error handling
|
||||
4. Add handler tests
|
||||
|
||||
### Phase 3: Testing
|
||||
|
||||
1. Add validation tests
|
||||
2. Test sharing scenarios
|
||||
3. Test error cases
|
||||
4. Test edge cases
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Test parameter validation
|
||||
- Test invalid combinations
|
||||
- Test expiration handling
|
||||
- Test access level validation
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test sharing updates
|
||||
- Test permission checks
|
||||
- Test error handling
|
||||
- Test complete sharing flow
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. All sharing parameters are properly validated
|
||||
2. Invalid configurations are rejected
|
||||
3. Tests pass for all scenarios
|
||||
4. Documentation is updated
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Implement validation changes
|
||||
2. Update handlers
|
||||
3. Deploy to staging
|
||||
4. Monitor for issues
|
||||
5. Deploy to production
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related Files
|
||||
|
||||
- `libs/handlers/src/sharing/validate.rs`
|
||||
- `libs/handlers/src/sharing/update_sharing.rs`
|
||||
- `libs/handlers/tests/sharing/sharing_params_test.rs`
|
||||
- `libs/models/src/sharing.rs`
|
||||
|
||||
### Sharing Parameter Reference
|
||||
|
||||
Valid parameter combinations:
|
||||
- Public sharing must allow anonymous access
|
||||
- Public sharing limited to read-only access
|
||||
- Expiration date must be in the future
|
||||
- Non-public sharing can have any access level
|
|
@ -0,0 +1,541 @@
|
|||
---
|
||||
title: Test Infrastructure Setup
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
parent_prd: project_bug_fixes_and_testing.md
|
||||
---
|
||||
|
||||
# Test Infrastructure Setup
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Our current testing infrastructure lacks standardized utilities for common testing scenarios, particularly around database operations and permission testing. This makes it difficult to write comprehensive tests and leads to duplicated test setup code.
|
||||
|
||||
Current pain points:
|
||||
- Duplicated database setup code across test files
|
||||
- Inconsistent permission testing patterns
|
||||
- No standard way to create test assets with permissions
|
||||
- Lack of cleanup utilities for test data
|
||||
- No helpers for common permission scenarios
|
||||
|
||||
## Goals
|
||||
|
||||
1. Create reusable database test utilities
|
||||
2. Establish permission testing helpers
|
||||
3. Provide asset creation utilities
|
||||
4. Implement test data cleanup
|
||||
5. Document testing patterns
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Rewriting existing tests
|
||||
2. Changing test framework
|
||||
3. Modifying production code
|
||||
4. Adding new test types
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The test infrastructure will provide a set of utilities and helpers that make it easier to write tests for our asset-related functionality. These utilities will be implemented in the `tests/common` directory of each library that needs them.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[TestDb] --> B[AssetTestHelpers]
|
||||
A --> C[PermissionTestHelpers]
|
||||
B --> D[MetricTestHelpers]
|
||||
B --> E[DashboardTestHelpers]
|
||||
B --> F[ChatTestHelpers]
|
||||
B --> G[CollectionTestHelpers]
|
||||
C --> H[TestUser]
|
||||
```
|
||||
|
||||
### File Changes
|
||||
|
||||
1. Create new test utility files:
|
||||
|
||||
```rust
|
||||
// libs/database/tests/common/mod.rs
|
||||
pub mod db;
|
||||
pub mod permissions;
|
||||
pub mod assets;
|
||||
pub mod users;
|
||||
```
|
||||
|
||||
2. Implement TestDb struct using existing pool:
|
||||
|
||||
```rust
|
||||
// libs/database/tests/common/db.rs
|
||||
use anyhow::{anyhow, Result};
|
||||
use uuid::Uuid;
|
||||
use database::pool::{init_test_pools, get_pg_pool, get_sqlx_pool, get_redis_pool};
|
||||
use diesel_async::AsyncPgConnection;
|
||||
use sqlx::postgres::PgConnection;
|
||||
use bb8_redis::RedisConnection;
|
||||
use tracing;
|
||||
use dotenv::dotenv;
|
||||
|
||||
pub struct TestDb {
|
||||
pub test_id: String,
|
||||
pub organization_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
initialized: bool,
|
||||
}
|
||||
|
||||
impl TestDb {
|
||||
pub async fn new() -> Result<Self> {
|
||||
// Load environment variables from .env file
|
||||
dotenv().ok();
|
||||
|
||||
// Initialize test pools
|
||||
if let Err(e) = init_test_pools().await {
|
||||
tracing::error!("Failed to initialize test database pools: {}", e);
|
||||
return Err(anyhow!("Failed to initialize test database pools: {}", e));
|
||||
}
|
||||
|
||||
let test_id = format!("test-{}", Uuid::new_v4());
|
||||
let organization_id = Uuid::new_v4();
|
||||
let user_id = Uuid::new_v4();
|
||||
|
||||
let db = Self {
|
||||
test_id,
|
||||
organization_id,
|
||||
user_id,
|
||||
initialized: true,
|
||||
};
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
pub async fn diesel_conn(&self) -> Result<AsyncPgConnection> {
|
||||
get_pg_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get diesel connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn sqlx_conn(&self) -> Result<PgConnection> {
|
||||
get_sqlx_pool()
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get sqlx connection: {}", e))
|
||||
}
|
||||
|
||||
pub async fn redis_conn(&self) -> Result<RedisConnection> {
|
||||
get_redis_pool()
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to get redis connection: {}", e))
|
||||
}
|
||||
|
||||
pub fn user(&self) -> AuthenticatedUser {
|
||||
AuthenticatedUser {
|
||||
id: self.user_id,
|
||||
organization_id: self.organization_id,
|
||||
// ... other fields ...
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
|
||||
// Delete test data using the test_id prefix
|
||||
diesel::delete(assets::table)
|
||||
.filter(assets::name.like(format!("{}%", self.test_id)))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Failed to cleanup test data: {}", e))?;
|
||||
|
||||
// ... cleanup other tables ...
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestDb {
|
||||
fn drop(&mut self) {
|
||||
if self.initialized {
|
||||
// Spawn cleanup in a new runtime
|
||||
match tokio::runtime::Runtime::new() {
|
||||
Ok(rt) => {
|
||||
if let Err(e) = rt.block_on(self.cleanup()) {
|
||||
tracing::error!("Failed to cleanup test data: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to create runtime for cleanup: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Implement permission helpers:
|
||||
|
||||
```rust
|
||||
// libs/database/tests/common/permissions.rs
|
||||
use database::enums::AssetPermissionRole;
|
||||
|
||||
pub struct PermissionTestHelpers;
|
||||
|
||||
impl PermissionTestHelpers {
|
||||
pub async fn create_permission(
|
||||
test_db: &TestDb,
|
||||
asset_id: Uuid,
|
||||
role: AssetPermissionRole,
|
||||
) -> Result<()> {
|
||||
let mut conn = test_db.diesel_conn().await?;
|
||||
|
||||
diesel::insert_into(permissions::table)
|
||||
.values((
|
||||
permissions::asset_id.eq(asset_id),
|
||||
permissions::user_id.eq(test_db.user_id),
|
||||
permissions::role.eq(role),
|
||||
permissions::granted_at.eq(Utc::now()),
|
||||
permissions::granted_by.eq(test_db.user_id),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn verify_permission(
|
||||
test_db: &TestDb,
|
||||
asset_id: Uuid,
|
||||
expected_role: AssetPermissionRole,
|
||||
) -> Result<()> {
|
||||
let mut conn = test_db.diesel_conn().await?;
|
||||
|
||||
let permission = permissions::table
|
||||
.filter(permissions::asset_id.eq(asset_id))
|
||||
.filter(permissions::user_id.eq(test_db.user_id))
|
||||
.first::<Permission>(&mut conn)
|
||||
.await?;
|
||||
|
||||
assert_eq!(permission.role, expected_role);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Implement asset helpers:
|
||||
|
||||
```rust
|
||||
// libs/database/tests/common/assets.rs
|
||||
pub struct AssetTestHelpers;
|
||||
|
||||
impl AssetTestHelpers {
|
||||
pub async fn create_test_metric(
|
||||
test_db: &TestDb,
|
||||
name: &str,
|
||||
) -> Result<Uuid> {
|
||||
let mut conn = test_db.diesel_conn().await?;
|
||||
let metric_id = Uuid::new_v4();
|
||||
|
||||
diesel::insert_into(metric_files::table)
|
||||
.values((
|
||||
metric_files::id.eq(metric_id),
|
||||
metric_files::name.eq(format!("{}-{}", test_db.test_id, name)),
|
||||
metric_files::organization_id.eq(test_db.organization_id),
|
||||
metric_files::created_by.eq(test_db.user_id),
|
||||
metric_files::created_at.eq(Utc::now()),
|
||||
metric_files::updated_at.eq(Utc::now()),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(metric_id)
|
||||
}
|
||||
|
||||
// Similar helpers for other asset types
|
||||
}
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Database pool from `@database` lib
|
||||
2. Asset type definitions
|
||||
3. Permission enums
|
||||
4. Test framework
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
|
||||
1. Set up test directory structure
|
||||
2. Implement TestDb with pool integration
|
||||
3. Add cleanup functionality
|
||||
4. Create example tests
|
||||
|
||||
### Phase 2: Permission Helpers
|
||||
|
||||
1. Implement permission creation helpers
|
||||
2. Add permission verification utilities
|
||||
3. Create common permission scenarios
|
||||
4. Document permission testing patterns
|
||||
|
||||
### Phase 3: Asset Helpers
|
||||
|
||||
1. Implement metric test helpers
|
||||
2. Add dashboard test helpers
|
||||
3. Create chat test helpers
|
||||
4. Add collection test helpers
|
||||
|
||||
### Phase 4: Documentation
|
||||
|
||||
1. Write usage documentation
|
||||
2. Create example tests
|
||||
3. Document best practices
|
||||
4. Add troubleshooting guide
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Test database connection handling
|
||||
- Verify cleanup functionality
|
||||
- Test permission helper methods
|
||||
- Validate asset creation utilities
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test complete workflows
|
||||
- Verify cleanup works across tests
|
||||
- Test concurrent test execution
|
||||
- Validate isolation between tests
|
||||
|
||||
## Example Usage
|
||||
|
||||
```rust
|
||||
#[tokio::test]
|
||||
async fn test_metric_permissions() -> Result<()> {
|
||||
// Setup test environment
|
||||
let test_db = TestDb::new().await?;
|
||||
|
||||
// Create test metric
|
||||
let metric_id = AssetTestHelpers::create_test_metric(
|
||||
&test_db,
|
||||
"Test Metric"
|
||||
).await?;
|
||||
|
||||
// Add permission
|
||||
PermissionTestHelpers::create_permission(
|
||||
&test_db,
|
||||
metric_id,
|
||||
AssetPermissionRole::CanView
|
||||
).await?;
|
||||
|
||||
// Verify permission
|
||||
PermissionTestHelpers::verify_permission(
|
||||
&test_db,
|
||||
metric_id,
|
||||
AssetPermissionRole::CanView
|
||||
).await?;
|
||||
|
||||
// Cleanup happens automatically when test_db is dropped
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. All utility functions implemented and tested
|
||||
2. Documentation complete and clear
|
||||
3. Example tests provided
|
||||
4. No test flakiness
|
||||
5. Cleanup working reliably
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Implement core infrastructure
|
||||
2. Add to one test file as proof of concept
|
||||
3. Gather feedback and iterate
|
||||
4. Roll out to all test files
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related Files
|
||||
|
||||
- `libs/database/tests/common/mod.rs`
|
||||
- `libs/database/tests/common/db.rs`
|
||||
- `libs/database/tests/common/permissions.rs`
|
||||
- `libs/database/tests/common/assets.rs`
|
||||
- `libs/database/tests/common/users.rs`
|
||||
|
||||
### Core Test Types
|
||||
|
||||
```rust
|
||||
// libs/database/tests/common/auth.rs
|
||||
|
||||
use uuid::Uuid;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::json;
|
||||
|
||||
impl TestDb {
|
||||
/// Creates a new test organization
|
||||
pub async fn create_organization(&self) -> Result<Organization> {
|
||||
let org = Organization {
|
||||
id: Uuid::new_v4(),
|
||||
name: "Test Organization".to_string(),
|
||||
domain: Some("test.org".to_string()),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(organizations::table)
|
||||
.values(&org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(org)
|
||||
}
|
||||
|
||||
/// Creates a new test user
|
||||
pub async fn create_user(&self) -> Result<User> {
|
||||
let user = User {
|
||||
id: Uuid::new_v4(),
|
||||
email: format!("test-{}@example.com", Uuid::new_v4()),
|
||||
name: Some("Test User".to_string()),
|
||||
config: json!({}),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
attributes: json!({}),
|
||||
avatar_url: None,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users::table)
|
||||
.values(&user)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
/// Creates a user-organization relationship with specified role
|
||||
pub async fn create_user_to_org(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
org_id: Uuid,
|
||||
role: UserOrganizationRole,
|
||||
) -> Result<UserToOrganization> {
|
||||
let user_org = UserToOrganization {
|
||||
user_id,
|
||||
organization_id: org_id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
deleted_at: None,
|
||||
created_by: user_id, // Self-created for test
|
||||
updated_by: user_id,
|
||||
deleted_by: None,
|
||||
status: UserOrganizationStatus::Active,
|
||||
};
|
||||
|
||||
let mut conn = self.diesel_conn().await?;
|
||||
diesel::insert_into(users_to_organizations::table)
|
||||
.values(&user_org)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(user_org)
|
||||
}
|
||||
|
||||
/// Creates an authenticated user with organization for testing
|
||||
pub async fn create_authenticated_user(
|
||||
&self,
|
||||
role: Option<UserOrganizationRole>,
|
||||
) -> Result<(AuthenticatedUser, Organization)> {
|
||||
// Create org and user
|
||||
let org = self.create_organization().await?;
|
||||
let user = self.create_user().await?;
|
||||
|
||||
// Create relationship with specified role (default to Admin)
|
||||
let role = role.unwrap_or(UserOrganizationRole::Admin);
|
||||
self.create_user_to_org(user.id, org.id, role).await?;
|
||||
|
||||
// Create authenticated user
|
||||
let auth_user = AuthenticatedUser {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
name: user.name,
|
||||
organization_id: org.id,
|
||||
role,
|
||||
sharing_setting: SharingSetting::Private,
|
||||
edit_sql: true,
|
||||
upload_csv: true,
|
||||
export_assets: true,
|
||||
email_slack_enabled: true,
|
||||
};
|
||||
|
||||
Ok((auth_user, org))
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper struct for test setup
|
||||
pub struct TestSetup {
|
||||
pub user: AuthenticatedUser,
|
||||
pub organization: Organization,
|
||||
pub db: TestDb,
|
||||
}
|
||||
|
||||
impl TestSetup {
|
||||
/// Creates a new test setup with authenticated user
|
||||
pub async fn new(role: Option<UserOrganizationRole>) -> Result<Self> {
|
||||
let test_db = TestDb::new().await?;
|
||||
let (user, org) = test_db.create_authenticated_user(role).await?;
|
||||
|
||||
Ok(Self {
|
||||
user,
|
||||
organization: org,
|
||||
db: test_db,
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example Usage
|
||||
|
||||
```rust
|
||||
#[tokio::test]
|
||||
async fn test_with_authenticated_user() -> Result<()> {
|
||||
// Create test setup with admin user
|
||||
let setup = TestSetup::new(Some(UserOrganizationRole::Admin)).await?;
|
||||
|
||||
// Use authenticated user in handler
|
||||
let result = some_handler(
|
||||
&setup.user,
|
||||
&setup.organization,
|
||||
// ... other params ...
|
||||
).await?;
|
||||
|
||||
// Test assertions
|
||||
assert!(result.is_ok());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_with_different_roles() -> Result<()> {
|
||||
// Test with viewer role
|
||||
let viewer_setup = TestSetup::new(Some(UserOrganizationRole::Viewer)).await?;
|
||||
let viewer_result = some_handler(&viewer_setup.user).await;
|
||||
assert!(viewer_result.is_err()); // Expect permission denied
|
||||
|
||||
// Test with editor role
|
||||
let editor_setup = TestSetup::new(Some(UserOrganizationRole::Editor)).await?;
|
||||
let editor_result = some_handler(&editor_setup.user).await;
|
||||
assert!(editor_result.is_ok()); // Expect success
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
---
|
||||
title: Bug Fixes and Testing Improvements
|
||||
author: Claude
|
||||
date: 2024-04-07
|
||||
status: Draft
|
||||
---
|
||||
|
||||
# Bug Fixes and Testing Improvements
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Several bugs have been identified in our asset handling and permission management systems that need to be addressed:
|
||||
|
||||
1. Metric status updates are not being properly propagated to the metric_file object (BUS-1069)
|
||||
2. Asset access control is not returning appropriate HTTP status codes (BUS-1067)
|
||||
3. Public sharing parameters are not being properly updated (BUS-1064)
|
||||
4. Permission field inconsistencies across asset types (BUS-1063)
|
||||
|
||||
These issues affect core functionality around asset access, sharing, and status management. Additionally, our testing infrastructure needs improvement to prevent similar issues in the future.
|
||||
|
||||
## Goals
|
||||
|
||||
1. Establish robust testing utilities for database and permission testing
|
||||
2. Fix metric status update propagation
|
||||
3. Standardize HTTP status codes for asset access control
|
||||
4. Ensure proper updating of public sharing parameters
|
||||
5. Standardize permission field handling across asset types
|
||||
6. Improve test coverage for asset-related functionality
|
||||
|
||||
## Non-Goals
|
||||
|
||||
1. Refactoring the entire permission system
|
||||
2. Adding new sharing features
|
||||
3. Modifying the underlying database schema
|
||||
4. Changing the existing API contracts
|
||||
|
||||
## Technical Design
|
||||
|
||||
### Overview
|
||||
|
||||
The implementation will focus on fixing bugs while establishing better testing infrastructure. The changes will be made in a way that maintains backward compatibility and doesn't require database migrations.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Test Infrastructure] --> B[Metric Status Fix]
|
||||
A --> C[Access Control Fix]
|
||||
A --> D[Sharing Parameters Fix]
|
||||
A --> E[Permission Field Fix]
|
||||
B --> F[Integration Tests]
|
||||
C --> F
|
||||
D --> F
|
||||
E --> F
|
||||
```
|
||||
|
||||
### Component Breakdown
|
||||
|
||||
#### Component 1: Test Infrastructure
|
||||
- Purpose: Establish common test utilities for database and permission testing
|
||||
- Sub-PRD: [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
- Interfaces:
|
||||
- Input: Test configuration
|
||||
- Output: Test utilities and helpers
|
||||
|
||||
#### Component 2: Metric Status Fix
|
||||
- Purpose: Fix metric status update propagation
|
||||
- Sub-PRD: [Metric Status Update Fix](api_metric_status_fix.md)
|
||||
- Interfaces:
|
||||
- Input: Metric update request
|
||||
- Output: Updated metric with correct status
|
||||
|
||||
#### Component 3: Access Control Fix
|
||||
- Purpose: Standardize HTTP status codes
|
||||
- Sub-PRD: [Asset Access Control Fix](api_access_control_fix.md)
|
||||
- Interfaces:
|
||||
- Input: Asset access request
|
||||
- Output: Appropriate HTTP status code
|
||||
|
||||
#### Component 4: Sharing Parameters Fix
|
||||
- Purpose: Fix public sharing parameter updates
|
||||
- Sub-PRD: [Sharing Parameters Fix](api_sharing_parameters_fix.md)
|
||||
- Interfaces:
|
||||
- Input: Sharing update request
|
||||
- Output: Updated sharing settings
|
||||
|
||||
#### Component 5: Permission Field Fix
|
||||
- Purpose: Standardize permission field handling
|
||||
- Sub-PRD: [Permission Field Fix](api_permission_field_fix.md)
|
||||
- Interfaces:
|
||||
- Input: Asset access request
|
||||
- Output: Consistent permission field
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. Database access for testing
|
||||
2. Existing permission models and enums
|
||||
3. HTTP status code definitions
|
||||
4. Asset type definitions
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
The implementation will be broken down into the following sub-PRDs, with their dependencies and concurrent development opportunities clearly defined:
|
||||
|
||||
1. [Test Infrastructure Setup](api_test_infrastructure.md) - **Must be completed first**
|
||||
- Establishes testing utilities needed by all other components
|
||||
- Dependencies: None
|
||||
- Required for: All other PRDs
|
||||
- Status: 🆕 Not Started
|
||||
|
||||
After Test Infrastructure is complete, the following tickets can be worked on concurrently:
|
||||
|
||||
2. [HTTP Status Code Fix](api_http_status_fix.md) - **Can be developed concurrently with 3, 4, and 5**
|
||||
- BUS-1067: Asset access control HTTP status codes
|
||||
- Dependencies: Test Infrastructure
|
||||
- Can be worked on concurrently with: Metric Status Fix, Sharing Parameters Fix, Permission Field Fix
|
||||
- Reason for concurrency: Modifies error handling layer which is independent of other changes
|
||||
- Status: 🆕 Not Started
|
||||
|
||||
3. [Metric Status Update Fix](api_metric_status_fix.md) - **Can be developed concurrently with 2, 4, and 5**
|
||||
- BUS-1069: Metric status propagation
|
||||
- Dependencies: Test Infrastructure
|
||||
- Can be worked on concurrently with: HTTP Status Fix, Sharing Parameters Fix, Permission Field Fix
|
||||
- Reason for concurrency: Modifies metric-specific logic that doesn't affect other components
|
||||
- Status: 🆕 Not Started
|
||||
|
||||
4. [Sharing Parameters Fix](api_sharing_parameters_fix.md) - **Can be developed concurrently with 2, 3, and 5**
|
||||
- BUS-1064: Public sharing parameters
|
||||
- Dependencies: Test Infrastructure
|
||||
- Can be worked on concurrently with: HTTP Status Fix, Metric Status Fix, Permission Field Fix
|
||||
- Reason for concurrency: Focuses on sharing validation logic that is separate from other changes
|
||||
- Status: 🆕 Not Started
|
||||
|
||||
5. [Permission Field Fix](api_permission_field_fix.md) - **Can be developed concurrently with 2, 3, and 4**
|
||||
- BUS-1063: Permission field consistency
|
||||
- Dependencies: Test Infrastructure
|
||||
- Can be worked on concurrently with: HTTP Status Fix, Metric Status Fix, Sharing Parameters Fix
|
||||
- Reason for concurrency: Changes permission response structure without affecting core permission logic
|
||||
- Status: 🆕 Not Started
|
||||
|
||||
### Concurrent Development Strategy
|
||||
|
||||
To enable efficient concurrent development without conflicts:
|
||||
|
||||
1. **Independent Code Paths**
|
||||
- HTTP Status Fix: Modifies error handling layer
|
||||
- Metric Status Fix: Updates metric-specific update logic
|
||||
- Sharing Parameters Fix: Changes sharing validation
|
||||
- Permission Field Fix: Standardizes permission response format
|
||||
|
||||
2. **Isolated Test Data**
|
||||
- Each test will use unique identifiers with test_id prefix
|
||||
- Test database connections managed by TestDb
|
||||
- Automatic cleanup after each test
|
||||
|
||||
3. **Clear Integration Points**
|
||||
- HTTP Status Fix: Other fixes will adopt new error types as they're completed
|
||||
- Permission Field Fix: Other components will use new permission field format
|
||||
- No circular dependencies between fixes
|
||||
|
||||
4. **Merge Strategy**
|
||||
- Test Infrastructure must be merged first
|
||||
- Other fixes can be merged in any order
|
||||
- Each fix includes its own tests and documentation
|
||||
|
||||
### Development Phases
|
||||
|
||||
Phase 1: Foundation (Sequential)
|
||||
- Complete Test Infrastructure
|
||||
- Review and merge
|
||||
|
||||
Phase 2: Bug Fixes (Concurrent)
|
||||
- Assign teams to each fix
|
||||
- Develop and test independently
|
||||
- Regular sync meetings to discuss integration
|
||||
|
||||
Phase 3: Integration (Sequential)
|
||||
- Merge completed fixes
|
||||
- Run full test suite
|
||||
- Verify all features working together
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Each fix must include comprehensive unit tests
|
||||
- Test both success and failure cases
|
||||
- Mock external dependencies where appropriate
|
||||
- Use test utilities for common setup
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Test complete workflows
|
||||
- Verify HTTP status codes
|
||||
- Test permission combinations
|
||||
- Verify database state after operations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Maintain existing permission checks
|
||||
- No exposure of sensitive data in error messages
|
||||
- Proper validation of public access parameters
|
||||
- Audit logging of permission changes
|
||||
|
||||
## Monitoring and Logging
|
||||
|
||||
- Log permission changes
|
||||
- Track HTTP status code distribution
|
||||
- Monitor test execution times
|
||||
- Alert on test failures
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. Deploy test infrastructure changes
|
||||
2. Roll out fixes one at a time
|
||||
3. Monitor for any issues
|
||||
4. Update documentation
|
||||
|
||||
## Appendix
|
||||
|
||||
### Related PRDs
|
||||
|
||||
- [Test Infrastructure Setup](api_test_infrastructure.md)
|
||||
- [Metric Status Update Fix](api_metric_status_fix.md)
|
||||
- [Asset Access Control Fix](api_access_control_fix.md)
|
||||
- [Sharing Parameters Fix](api_sharing_parameters_fix.md)
|
||||
- [Permission Field Fix](api_permission_field_fix.md)
|
|
@ -1,716 +0,0 @@
|
|||
# Refactoring Metric Data Metadata Storage
|
||||
|
||||
## Problem Statement ✅
|
||||
|
||||
The `MetricYml` structure currently contains a `data_metadata` field that stores column metadata information as part of the metric definition. This approach has several issues:
|
||||
|
||||
1. The data metadata is derived from query results rather than being an intrinsic property of the metric definition
|
||||
2. It can become outdated when underlying data changes, as it's not automatically updated
|
||||
3. Including it in the metric definition adds unnecessary complexity to the metric YAML schema
|
||||
4. It requires redundant validation logic in multiple places
|
||||
5. Multiple implementations of metadata calculation exist in different parts of the codebase (get_metric_data_handler.rs, run_sql.rs)
|
||||
|
||||
**We need to establish a single source of truth for data metadata** by making the query_engine directly responsible for metadata calculation, treating DataMetadata as a first-class database object, and ensuring it's always returned with every query result.
|
||||
|
||||
By moving data metadata from the metric definition to a database column on the `metric_files` table and standardizing its calculation in the query engine, we'll separate the metric definition from its execution results, maintain cleaner YAML schema for metrics, and provide a more reliable caching mechanism for metadata.
|
||||
|
||||
### Current Limitations
|
||||
- Data metadata in MetricYml can become stale when source data changes
|
||||
- Validation requires redundant computation of metadata in multiple places
|
||||
- `METRIC_YML_SCHEMA` includes unnecessary complexity for data_metadata validation
|
||||
- Users may be confused about whether to update data_metadata manually
|
||||
- Different code paths calculate metadata inconsistently (get_metric_data_handler.rs vs run_sql.rs)
|
||||
- Duplicate code exists for metadata calculations
|
||||
|
||||
### Impact
|
||||
- User Impact: Cleaner metric definition format, more reliable metadata information in UI
|
||||
- System Impact: More efficient caching of query metadata, reduced redundancy, consistent metadata calculations
|
||||
- Business Impact: Improved reliability of metric visualizations and metadata analysis
|
||||
- Developer Impact: Reduced code duplication, single source of truth for metadata
|
||||
|
||||
## Requirements
|
||||
|
||||
### Functional Requirements ✅
|
||||
|
||||
#### Core Functionality
|
||||
- Remove `data_metadata` field from `MetricYml` struct
|
||||
- Details: Field should be completely removed from the structure
|
||||
- Acceptance Criteria: All metric-related code builds and functions without the field
|
||||
- Dependencies: None
|
||||
|
||||
- Make `DataMetadata` a first-class database type with FromSql/ToSql implementations
|
||||
- Details: Create a proper Rust type with serialization/deserialization support
|
||||
- Acceptance Criteria: DataMetadata can be stored and retrieved from database
|
||||
- Dependencies: None
|
||||
|
||||
- Add `data_metadata` column to `metric_files` table
|
||||
- Details: A new JSONB column added to the metric_files table to store cached metadata
|
||||
- Acceptance Criteria: Successful migration that adds the column
|
||||
- Dependencies: None
|
||||
|
||||
- Enhance query_engine to always return data metadata with results
|
||||
- Details: The query_engine should compute and return metadata for every query
|
||||
- Acceptance Criteria: query_engine returns both results and metadata in a single structure
|
||||
- Dependencies: None
|
||||
|
||||
- Consolidate metadata calculation logic from run_sql.rs and get_metric_data_handler.rs
|
||||
- Details: Move all metadata calculation to the query_engine
|
||||
- Acceptance Criteria: All metadata calculations use a single implementation
|
||||
- Dependencies: None
|
||||
|
||||
- Update validate_sql to store computed metadata in the metric file
|
||||
- Details: When validating SQL, save the computed metadata to the metric file record
|
||||
- Acceptance Criteria: Validation updates the data_metadata field with current metadata
|
||||
- Dependencies: data_metadata column added to metric_files
|
||||
|
||||
- Update create_metrics.rs and update_metric.rs to handle data_metadata in the database
|
||||
- Details: Any SQL changes should trigger metadata recalculation and storage
|
||||
- Acceptance Criteria: Metadata is correctly updated when SQL is modified
|
||||
- Dependencies: query_engine metadata calculation
|
||||
|
||||
- Continue saving data_metadata in version_history for metrics
|
||||
- Details: Include data_metadata in version history to preserve historical metadata
|
||||
- Acceptance Criteria: Version history preserves data_metadata for each version
|
||||
- Dependencies: None
|
||||
|
||||
### Non-Functional Requirements ✅
|
||||
|
||||
- Performance Requirements
|
||||
- Metadata computation should not significantly impact query performance
|
||||
- Accessing cached metadata should be faster than recomputing it
|
||||
- Security Requirements
|
||||
- No additional security requirements (using existing table and permissions)
|
||||
- Scalability Requirements
|
||||
- Metadata should be compact enough to not significantly increase database size
|
||||
|
||||
## Technical Design ✅
|
||||
|
||||
### System Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Client Request] --> B[REST/WS API]
|
||||
B --> C[Handlers]
|
||||
C --> D[query_engine]
|
||||
D -->|QueryResult with data & metadata| C
|
||||
C -->|Store Metadata| E[metric_files table]
|
||||
E -->|Cached Metadata| C
|
||||
C -->|Response| B
|
||||
B --> A
|
||||
```
|
||||
|
||||
### Core Components ✅
|
||||
|
||||
#### Component 1: Updated MetricYml Structure
|
||||
```rust
|
||||
// Remove data_metadata from MetricYml
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, FromSqlRow, AsExpression)]
|
||||
#[diesel(sql_type = Jsonb)]
|
||||
pub struct MetricYml {
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub time_frame: String,
|
||||
pub sql: String,
|
||||
pub chart_config: ChartConfig,
|
||||
// data_metadata field removed
|
||||
pub dataset_ids: Vec<Uuid>,
|
||||
}
|
||||
```
|
||||
|
||||
#### Component 2: DataMetadata as First-Class Type
|
||||
```rust
|
||||
// First-class DataMetadata type with database serialization
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, FromSqlRow, AsExpression)]
|
||||
#[diesel(sql_type = Jsonb)]
|
||||
pub struct DataMetadata {
|
||||
pub column_count: i64,
|
||||
pub row_count: i64,
|
||||
pub column_metadata: Vec<ColumnMetaData>,
|
||||
}
|
||||
|
||||
// Column metadata type
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ColumnMetaData {
|
||||
pub name: String,
|
||||
pub min_value: serde_json::Value,
|
||||
pub max_value: serde_json::Value,
|
||||
pub unique_values: i32,
|
||||
pub simple_type: SimpleType,
|
||||
#[serde(rename = "type")]
|
||||
pub column_type: ColumnType,
|
||||
}
|
||||
|
||||
// Implement FromSql and ToSql for database serialization
|
||||
impl FromSql<Jsonb, Pg> for DataMetadata {
|
||||
fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as FromSql<Jsonb, Pg>>::from_sql(bytes)?;
|
||||
Ok(serde_json::from_value(value)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToSql<Jsonb, Pg> for DataMetadata {
|
||||
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> diesel::serialize::Result {
|
||||
out.write_all(&[1])?; // JSONB version 1 header
|
||||
out.write_all(&serde_json::to_vec(self)?)?;
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Component 3: Updated metric_files Schema
|
||||
```rust
|
||||
// Updated MetricFile model
|
||||
#[derive(Queryable, Insertable, Identifiable, Debug, Clone, Serialize)]
|
||||
#[diesel(table_name = metric_files)]
|
||||
pub struct MetricFile {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub file_name: String,
|
||||
pub content: MetricYml,
|
||||
pub verification: Verification,
|
||||
pub evaluation_obj: Option<Value>,
|
||||
pub evaluation_summary: Option<String>,
|
||||
pub evaluation_score: Option<f64>,
|
||||
pub organization_id: Uuid,
|
||||
pub created_by: Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub deleted_at: Option<DateTime<Utc>>,
|
||||
pub publicly_accessible: bool,
|
||||
pub publicly_enabled_by: Option<Uuid>,
|
||||
pub public_expiry_date: Option<DateTime<Utc>>,
|
||||
pub version_history: VersionHistory,
|
||||
pub data_metadata: Option<DataMetadata>, // Changed to strongly typed DataMetadata
|
||||
}
|
||||
```
|
||||
|
||||
#### Component 4: Updated query_engine Interface
|
||||
```rust
|
||||
// Define a QueryResult structure to hold both results and metadata
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct QueryResult {
|
||||
pub data: Vec<IndexMap<String, DataType>>,
|
||||
pub metadata: DataMetadata,
|
||||
}
|
||||
|
||||
// Enhanced query_engine that always computes and returns metadata
|
||||
pub async fn query_engine(
|
||||
data_source_id: &Uuid,
|
||||
sql: &str,
|
||||
limit: Option<i64>,
|
||||
) -> Result<QueryResult> {
|
||||
let corrected_sql = sql.to_owned();
|
||||
let secure_sql = corrected_sql.clone();
|
||||
|
||||
if let Some(warning) = query_safety_filter(secure_sql.clone()).await {
|
||||
return Err(anyhow!(warning));
|
||||
}
|
||||
|
||||
let results = match route_to_query(data_source_id, &secure_sql, limit).await {
|
||||
Ok(results) => results,
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"There was an issue while querying the data source: {}",
|
||||
e
|
||||
);
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Always compute metadata from results - consolidated from run_sql.rs implementation
|
||||
let metadata = compute_data_metadata(&results);
|
||||
|
||||
// Return both results and metadata in the QueryResult structure
|
||||
Ok(QueryResult {
|
||||
data: results,
|
||||
metadata,
|
||||
})
|
||||
}
|
||||
|
||||
// Consolidated metadata calculation function based on run_sql.rs implementation
|
||||
fn compute_data_metadata(data: &[IndexMap<String, DataType>]) -> DataMetadata {
|
||||
if data.is_empty() {
|
||||
return DataMetadata {
|
||||
column_count: 0,
|
||||
row_count: 0,
|
||||
column_metadata: vec![],
|
||||
};
|
||||
}
|
||||
|
||||
let first_row = &data[0];
|
||||
let column_count = first_row.len() as i64;
|
||||
let row_count = data.len() as i64;
|
||||
let column_metadata = compute_column_metadata(data);
|
||||
|
||||
DataMetadata {
|
||||
column_count,
|
||||
row_count,
|
||||
column_metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for computing column metadata
|
||||
fn compute_column_metadata(data: &[IndexMap<String, DataType>]) -> Vec<ColumnMetaData> {
|
||||
if data.is_empty() {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let first_row = &data[0];
|
||||
let columns: Vec<_> = first_row.keys().cloned().collect();
|
||||
|
||||
// Use existing column metadata calculation from run_sql.rs
|
||||
// This maintains compatibility with the current implementation while
|
||||
// centralizing the logic in the query_engine
|
||||
columns.iter().map(|column_name| {
|
||||
let mut value_map = HashSet::new();
|
||||
let mut min_value = None;
|
||||
let mut max_value = None;
|
||||
let mut is_date_type = false;
|
||||
let mut min_value_str: Option<String> = None;
|
||||
let mut max_value_str: Option<String> = None;
|
||||
|
||||
for row in data {
|
||||
if let Some(value) = row.get(column_name) {
|
||||
// Track unique values (up to a reasonable limit)
|
||||
if value_map.len() < 100 {
|
||||
value_map.insert(format!("{:?}", value));
|
||||
}
|
||||
|
||||
// Calculate min/max for appropriate types
|
||||
match value {
|
||||
DataType::Int2(Some(v)) |
|
||||
DataType::Int4(Some(v)) => {
|
||||
let n = *v as f64;
|
||||
min_value = Some(min_value.map_or(n, |min: f64| min.min(n)));
|
||||
max_value = Some(max_value.map_or(n, |max: f64| max.max(n)));
|
||||
}
|
||||
DataType::Int8(Some(v)) => {
|
||||
let n = *v as f64;
|
||||
min_value = Some(min_value.map_or(n, |min: f64| min.min(n)));
|
||||
max_value = Some(max_value.map_or(n, |max: f64| max.max(n)));
|
||||
}
|
||||
DataType::Float4(Some(v)) |
|
||||
DataType::Float8(Some(v)) => {
|
||||
let n = *v as f64;
|
||||
min_value = Some(min_value.map_or(n, |min: f64| min.min(n)));
|
||||
max_value = Some(max_value.map_or(n, |max: f64| max.max(n)));
|
||||
}
|
||||
DataType::Date(Some(date)) => {
|
||||
is_date_type = true;
|
||||
let date_str = date.to_string();
|
||||
update_date_min_max(&date_str, &mut min_value_str, &mut max_value_str);
|
||||
}
|
||||
DataType::Timestamp(Some(ts)) |
|
||||
DataType::Timestamptz(Some(ts)) => {
|
||||
is_date_type = true;
|
||||
let ts_str = ts.to_string();
|
||||
update_date_min_max(&ts_str, &mut min_value_str, &mut max_value_str);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the column type and simple type
|
||||
let column_type = first_row.get(column_name).unwrap();
|
||||
let (simple_type, column_type) = determine_types(column_type);
|
||||
|
||||
// Format min/max values appropriately based on type
|
||||
let (min_value, max_value) = if is_date_type {
|
||||
(
|
||||
min_value_str.map_or(serde_json::Value::Null, |v| serde_json::Value::String(v)),
|
||||
max_value_str.map_or(serde_json::Value::Null, |v| serde_json::Value::String(v)),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
min_value.map_or(serde_json::Value::Null, |v| serde_json::Value::Number(serde_json::Number::from_f64(v).unwrap_or_default())),
|
||||
max_value.map_or(serde_json::Value::Null, |v| serde_json::Value::Number(serde_json::Number::from_f64(v).unwrap_or_default())),
|
||||
)
|
||||
};
|
||||
|
||||
ColumnMetaData {
|
||||
name: column_name.clone(),
|
||||
min_value,
|
||||
max_value,
|
||||
unique_values: value_map.len() as i32,
|
||||
simple_type,
|
||||
column_type,
|
||||
}
|
||||
}).collect()
|
||||
}
|
||||
```
|
||||
|
||||
#### Component 5: Updated validate_sql Function
|
||||
```rust
|
||||
pub async fn validate_sql(
|
||||
sql: &str,
|
||||
dataset_id: &Uuid,
|
||||
) -> Result<(String, Vec<IndexMap<String, DataType>>, Option<DataMetadata>)> {
|
||||
// Get data source for the dataset
|
||||
let mut conn = get_pg_pool().get().await?;
|
||||
let data_source_id = datasets::table
|
||||
.filter(datasets::id.eq(dataset_id))
|
||||
.select(datasets::data_source_id)
|
||||
.first::<Uuid>(&mut conn)
|
||||
.await?;
|
||||
|
||||
// Execute query and get results with metadata
|
||||
let query_result = query_engine(&data_source_id, sql, None).await?;
|
||||
|
||||
let num_records = query_result.data.len();
|
||||
let message = if num_records == 0 {
|
||||
"No records were found".to_string()
|
||||
} else {
|
||||
format!("{} records were returned", num_records)
|
||||
};
|
||||
|
||||
// Return records (limited to 13), message, and metadata
|
||||
let return_records = if num_records <= 13 {
|
||||
query_result.data
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok((message, return_records, Some(query_result.metadata)))
|
||||
}
|
||||
```
|
||||
|
||||
// Updated MetricDataResponse structure to use DataMetadata
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct MetricDataResponse {
|
||||
pub metric_id: Uuid,
|
||||
pub data: Vec<IndexMap<String, DataType>>,
|
||||
pub data_metadata: DataMetadata, // Changed from MetricData to DataMetadata
|
||||
}
|
||||
|
||||
### Database Changes
|
||||
|
||||
```sql
|
||||
-- Add data_metadata column to metric_files table
|
||||
ALTER TABLE metric_files
|
||||
ADD COLUMN data_metadata JSONB;
|
||||
|
||||
-- Create index for faster queries
|
||||
CREATE INDEX metric_files_data_metadata_idx ON metric_files USING GIN (data_metadata);
|
||||
```
|
||||
|
||||
### File Changes
|
||||
|
||||
#### Modified Files
|
||||
- `libs/database/src/types/metric_yml.rs`
|
||||
- Changes: Remove data_metadata field and related functionality
|
||||
- Impact: Simplifies metric definition structure
|
||||
- Dependencies: None
|
||||
|
||||
- `libs/database/src/types/data_metadata.rs` (NEW)
|
||||
- Changes: Create new file for DataMetadata first-class type with FromSql/ToSql impls
|
||||
- Impact: Consistent metadata type across the codebase
|
||||
- Dependencies: None
|
||||
|
||||
- `libs/database/src/models.rs`
|
||||
- Changes: Add data_metadata field to MetricFile struct with strong typing
|
||||
- Impact: Enables storage of metadata in the database
|
||||
- Dependencies: DataMetadata type, Migration to add column
|
||||
|
||||
- `libs/database/src/schema.rs`
|
||||
- Changes: Update metric_files table definition to include data_metadata
|
||||
- Impact: Reflects database schema change
|
||||
- Dependencies: Migration to add column
|
||||
|
||||
- `libs/agents/src/tools/categories/file_tools/common.rs`
|
||||
- Changes: Update METRIC_YML_SCHEMA to remove data_metadata, update validate_sql
|
||||
- Impact: Simplifies schema validation for metrics, uses new DataMetadata type
|
||||
- Dependencies: DataMetadata type
|
||||
|
||||
- `libs/query_engine/src/data_source_query_routes/query_engine.rs`
|
||||
- Changes: Create QueryResult struct, enhance query_engine to always compute metadata
|
||||
- Impact: Centralizes metadata computation, ensures it's always available
|
||||
- Dependencies: DataMetadata type
|
||||
|
||||
- `libs/handlers/src/metrics/get_metric_data_handler.rs`
|
||||
- Changes: Remove metadata computation, use metadata from QueryResult
|
||||
- Impact: Removes duplicate code, uses consistent metadata
|
||||
- Dependencies: Enhanced query_engine
|
||||
|
||||
- `libs/handlers/src/metrics/update_metric_handler.rs`
|
||||
- Changes: Update data_metadata when SQL changes
|
||||
- Impact: Ensures metadata is updated when metric definition changes
|
||||
- Dependencies: Enhanced query_engine
|
||||
|
||||
- `libs/agents/src/tools/categories/file_tools/create_metrics.rs`
|
||||
- Changes: Add data_metadata to created metric files
|
||||
- Impact: Ensures metadata is stored during metric creation
|
||||
- Dependencies: Enhanced query_engine
|
||||
|
||||
- `server/src/routes/rest/routes/sql/run_sql.rs`
|
||||
- Changes: Remove duplicate metadata computation, use metadata from QueryResult
|
||||
- Impact: Removes duplicate code, uses consistent metadata
|
||||
- Dependencies: Enhanced query_engine
|
||||
|
||||
- `server/src/routes/ws/sql/run_sql.rs`
|
||||
- Changes: Remove duplicate metadata computation, use metadata from QueryResult
|
||||
- Impact: Removes duplicate code, uses consistent metadata
|
||||
- Dependencies: Enhanced query_engine
|
||||
|
||||
#### New Files
|
||||
- `libs/database/src/types/data_metadata.rs`
|
||||
- Purpose: Define DataMetadata as a first-class type with serialization
|
||||
- Key components: DataMetadata struct, FromSql/ToSql impls
|
||||
- Dependencies: None
|
||||
|
||||
- `migrations/YYYY-MM-DD-HHMMSS_add_data_metadata_to_metric_files/up.sql`
|
||||
- Purpose: Add data_metadata column to metric_files table
|
||||
- Key components: SQL to add column and index
|
||||
- Dependencies: None
|
||||
|
||||
- `migrations/YYYY-MM-DD-HHMMSS_add_data_metadata_to_metric_files/down.sql`
|
||||
- Purpose: Remove data_metadata column from metric_files table
|
||||
- Key components: SQL to drop column and index
|
||||
- Dependencies: None
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Database Schema Update 🔜 (Not Started)
|
||||
|
||||
1. Create migration to add data_metadata column
|
||||
- [ ] Generate migration files using diesel CLI:
|
||||
```bash
|
||||
cd /path/to/project
|
||||
diesel migration generate add_data_metadata_to_metric_files
|
||||
```
|
||||
- [ ] Add the following SQL to the up.sql file:
|
||||
```sql
|
||||
-- Add data_metadata column to metric_files table
|
||||
ALTER TABLE metric_files ADD COLUMN data_metadata JSONB;
|
||||
|
||||
-- Create index for faster queries
|
||||
CREATE INDEX metric_files_data_metadata_idx ON metric_files USING GIN (data_metadata);
|
||||
```
|
||||
- [ ] Add the following SQL to the down.sql file:
|
||||
```sql
|
||||
-- Drop index first
|
||||
DROP INDEX IF EXISTS metric_files_data_metadata_idx;
|
||||
|
||||
-- Drop column
|
||||
ALTER TABLE metric_files DROP COLUMN IF EXISTS data_metadata;
|
||||
```
|
||||
- [ ] Run the migration:
|
||||
```bash
|
||||
diesel migration run
|
||||
```
|
||||
|
||||
2. Update database-related structs
|
||||
- [ ] Update models.rs with new field
|
||||
- [ ] Update version_history.rs to include data_metadata for metric_file versions
|
||||
|
||||
### Phase 2: Metadata Computation 🔜 (Not Started)
|
||||
|
||||
1. Enhance query_engine
|
||||
- [ ] Create QueryResult structure to hold results and metadata
|
||||
- [ ] Update query_engine function to compute and return metadata
|
||||
- [ ] Implement compute_column_metadata helper function
|
||||
- [ ] Add tests for metadata computation
|
||||
|
||||
2. Update validate_sql function
|
||||
- [ ] Modify to use enhanced query_engine and extract metadata
|
||||
- [ ] Add code to return metadata for metric storage
|
||||
- [ ] Update tests
|
||||
|
||||
### Phase 3: Metric Definition Update 🔜 (Not Started)
|
||||
|
||||
1. Update MetricYml structure
|
||||
- [ ] Remove data_metadata field
|
||||
- [ ] Update related methods and tests
|
||||
- [ ] Update METRIC_YML_SCHEMA to remove data_metadata
|
||||
|
||||
2. Update handlers to use cached metadata
|
||||
- [ ] Modify get_metric_data_handler to use cached metadata
|
||||
- [ ] Update update_metric_handler to recalculate metadata when SQL changes
|
||||
- [ ] Update tests
|
||||
|
||||
### Phase 4: Agent Tool Updates 🔜 (Not Started)
|
||||
|
||||
1. Update metric creation and modification tools
|
||||
- [ ] Modify process_metric_file to include metadata
|
||||
- [ ] Update create_metrics.rs to store metadata
|
||||
- [ ] Update tests
|
||||
|
||||
2. Migration for existing metrics
|
||||
- [ ] Create script to calculate and store metadata for existing metrics
|
||||
- [ ] Test migration on staging
|
||||
- [ ] Run migration in production
|
||||
|
||||
3. Documentation and cleanup
|
||||
- [ ] Update API documentation
|
||||
- [ ] Add comments explaining metadata handling
|
||||
- [ ] Remove any leftover references to data_metadata in yml
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- Test metadata computation with various data types
|
||||
- Test validate_sql with both valid and invalid queries
|
||||
- Test MetricYml serialization and deserialization without data_metadata
|
||||
|
||||
### Integration Tests
|
||||
- Test end-to-end flow from query to metadata storage
|
||||
- Test metric creation with metadata computation
|
||||
- Test metric update with metadata recalculation
|
||||
|
||||
### Database Migration Tests
|
||||
- Test migration on a copy of production data
|
||||
- Verify data_metadata can be computed for existing metrics
|
||||
- Test rollback procedure
|
||||
|
||||
## Migration Procedure
|
||||
|
||||
### Database Migration
|
||||
|
||||
1. **Generate Migration Files**
|
||||
```bash
|
||||
cd /path/to/project
|
||||
diesel migration generate add_data_metadata_to_metric_files
|
||||
```
|
||||
|
||||
2. **Implement Migration Files**
|
||||
|
||||
**up.sql**:
|
||||
```sql
|
||||
-- Add data_metadata column to metric_files table
|
||||
ALTER TABLE metric_files ADD COLUMN data_metadata JSONB;
|
||||
|
||||
-- Create index for faster queries
|
||||
CREATE INDEX metric_files_data_metadata_idx ON metric_files USING GIN (data_metadata);
|
||||
```
|
||||
|
||||
**down.sql**:
|
||||
```sql
|
||||
-- Drop index first
|
||||
DROP INDEX IF EXISTS metric_files_data_metadata_idx;
|
||||
|
||||
-- Drop column
|
||||
ALTER TABLE metric_files DROP COLUMN IF EXISTS data_metadata;
|
||||
```
|
||||
|
||||
3. **Run Migration**
|
||||
```bash
|
||||
diesel migration run
|
||||
```
|
||||
|
||||
4. **Verify Migration**
|
||||
```sql
|
||||
SELECT column_name, data_type
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'metric_files' AND column_name = 'data_metadata';
|
||||
```
|
||||
|
||||
### Code Updates
|
||||
|
||||
1. **Update Diesel Schema**
|
||||
```bash
|
||||
diesel print-schema > libs/database/src/schema.rs
|
||||
```
|
||||
|
||||
2. **Adapt Database Models**
|
||||
- Add data_metadata field to MetricFile struct in models.rs
|
||||
- Remove data_metadata field from MetricYml in metric_yml.rs
|
||||
- Update version_history.rs to handle data_metadata in versions
|
||||
|
||||
3. **Update Query Engine**
|
||||
- Create QueryResult structure in query_engine.rs
|
||||
- Enhance existing query_engine function to calculate and return metadata
|
||||
- Implement compute_column_metadata helper function
|
||||
- Add tests for metadata computation
|
||||
|
||||
4. **Update Handlers and Tools**
|
||||
- Update validate_sql to use enhanced query_engine and extract metadata
|
||||
- Modify get_metric_data_handler to use cached metadata when available
|
||||
- Update update_metric_handler to recalculate metadata when SQL changes
|
||||
- Update create_metrics.rs to store metadata for new metrics
|
||||
|
||||
### Backfill Data for Existing Metrics
|
||||
|
||||
A migration script will be needed to populate the data_metadata column for existing metrics. This script will:
|
||||
|
||||
1. **Query for Existing Metrics**
|
||||
```rust
|
||||
let metrics = metric_files::table
|
||||
.filter(metric_files::deleted_at.is_null())
|
||||
.load::<MetricFile>(&mut conn)
|
||||
.await?;
|
||||
```
|
||||
|
||||
2. **Process Each Metric**
|
||||
```rust
|
||||
for metric in metrics {
|
||||
// Parse YAML content
|
||||
let metric_yml: MetricYml = metric.content;
|
||||
|
||||
// Get dataset IDs
|
||||
let dataset_id = metric_yml.dataset_ids.first().unwrap_or(default_dataset_id);
|
||||
|
||||
// Get data source for dataset
|
||||
let data_source_id = get_data_source_for_dataset(dataset_id).await?;
|
||||
|
||||
// Execute query with metadata calculation
|
||||
let query_result = query_engine(&data_source_id, &metric_yml.sql, None).await?;
|
||||
|
||||
// Update metric record with metadata
|
||||
diesel::update(metric_files::table)
|
||||
.filter(metric_files::id.eq(metric.id))
|
||||
.set(metric_files::data_metadata.eq(serde_json::to_value(query_result.metadata)?))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
}
|
||||
```
|
||||
|
||||
3. **Deployment Strategy**
|
||||
- Run script during off-peak hours
|
||||
- Use transaction batching (for example, process 100 metrics per transaction)
|
||||
- Add logging and resumability in case of interruption
|
||||
- Monitor database performance during migration
|
||||
|
||||
### Deployment Strategy
|
||||
|
||||
1. **Pre-Deployment**
|
||||
- Run all tests on staging environment
|
||||
- Verify migrations work correctly
|
||||
- Ensure backfill script completes successfully
|
||||
- Check for performance issues
|
||||
|
||||
2. **Deployment Order**
|
||||
1. Deploy database migration
|
||||
2. Deploy code changes (compatible with both old and new schema)
|
||||
3. Run backfill script
|
||||
4. Deploy final code changes (removing old schema support)
|
||||
|
||||
3. **Rollback Strategy**
|
||||
- If issues are detected after migration but before backfill: roll back code changes, then run down.sql
|
||||
- If issues are detected after backfill: restore from backup or reverse data changes programmatically
|
||||
|
||||
## Security Considerations
|
||||
- No additional security concerns (using existing database permissions)
|
||||
|
||||
## Rollback Plan
|
||||
- Database migration includes down.sql for rollback
|
||||
- Code can be reverted if issues are found
|
||||
- Temporary dual support for both approaches during transition
|
||||
|
||||
## Monitoring
|
||||
- Add logs for metadata computation and caching
|
||||
- Monitor query performance to ensure minimal impact
|
||||
- Track database size to ensure metadata doesn't cause significant growth
|
||||
|
||||
## Success Criteria
|
||||
- All metrics have data_metadata stored in the database
|
||||
- MetricYml structures no longer contain data_metadata field
|
||||
- Visualization in UI uses cached metadata correctly
|
||||
- Performance equal to or better than previous implementation
|
||||
|
||||
// Updated MetricDataResponse structure to use DataMetadata
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct MetricDataResponse {
|
||||
pub metric_id: Uuid,
|
||||
pub data: Vec<IndexMap<String, DataType>>,
|
||||
pub data_metadata: DataMetadata, // Changed from MetricData to DataMetadata
|
||||
}
|
Loading…
Reference in New Issue