perf: optimize HTTP client, DB queries, and clean up dead code
- Make reqwest::Client a LazyLock singleton instead of per-call allocation - Parallelize 3 independent DB queries in get_index_advisor_report with tokio::join! - Eliminate per-iteration Vec allocation in snapshot FK dependency loop - Hoist try_local_pg_dump() call in SampleData clone mode to avoid double execution - Evict stale schema cache entries on write to prevent unbounded memory growth - Remove unused ValidationReport struct and config_path field - Rename IndexRecommendationType variants to remove redundant suffix
This commit is contained in:
@@ -2,10 +2,10 @@ use crate::commands::data::bind_json_value;
|
|||||||
use crate::commands::queries::pg_value_to_json;
|
use crate::commands::queries::pg_value_to_json;
|
||||||
use crate::error::{TuskError, TuskResult};
|
use crate::error::{TuskError, TuskResult};
|
||||||
use crate::models::ai::{
|
use crate::models::ai::{
|
||||||
AiProvider, AiSettings, GenerateDataParams, GeneratedDataPreview, GeneratedTableData,
|
AiProvider, AiSettings, DataGenProgress, GenerateDataParams, GeneratedDataPreview,
|
||||||
IndexAdvisorReport, IndexRecommendation, IndexStats,
|
GeneratedTableData, IndexAdvisorReport, IndexRecommendation, IndexStats, OllamaChatMessage,
|
||||||
OllamaChatMessage, OllamaChatRequest, OllamaChatResponse, OllamaModel, OllamaTagsResponse,
|
OllamaChatRequest, OllamaChatResponse, OllamaModel, OllamaTagsResponse, SlowQuery, TableStats,
|
||||||
SlowQuery, TableStats, ValidationRule, ValidationStatus, DataGenProgress,
|
ValidationRule, ValidationStatus,
|
||||||
};
|
};
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
use crate::utils::{escape_ident, topological_sort_tables};
|
use crate::utils::{escape_ident, topological_sort_tables};
|
||||||
@@ -20,12 +20,16 @@ use tauri::{AppHandle, Emitter, Manager, State};
|
|||||||
const MAX_RETRIES: u32 = 2;
|
const MAX_RETRIES: u32 = 2;
|
||||||
const RETRY_DELAY_MS: u64 = 1000;
|
const RETRY_DELAY_MS: u64 = 1000;
|
||||||
|
|
||||||
fn http_client() -> reqwest::Client {
|
fn http_client() -> &'static reqwest::Client {
|
||||||
reqwest::Client::builder()
|
use std::sync::LazyLock;
|
||||||
.connect_timeout(Duration::from_secs(5))
|
static CLIENT: LazyLock<reqwest::Client> = LazyLock::new(|| {
|
||||||
.timeout(Duration::from_secs(300))
|
reqwest::Client::builder()
|
||||||
.build()
|
.connect_timeout(Duration::from_secs(5))
|
||||||
.unwrap_or_default()
|
.timeout(Duration::from_secs(300))
|
||||||
|
.build()
|
||||||
|
.unwrap_or_default()
|
||||||
|
});
|
||||||
|
&CLIENT
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_ai_settings_path(app: &AppHandle) -> TuskResult<std::path::PathBuf> {
|
fn get_ai_settings_path(app: &AppHandle) -> TuskResult<std::path::PathBuf> {
|
||||||
@@ -65,11 +69,10 @@ pub async fn save_ai_settings(
|
|||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn list_ollama_models(ollama_url: String) -> TuskResult<Vec<OllamaModel>> {
|
pub async fn list_ollama_models(ollama_url: String) -> TuskResult<Vec<OllamaModel>> {
|
||||||
let url = format!("{}/api/tags", ollama_url.trim_end_matches('/'));
|
let url = format!("{}/api/tags", ollama_url.trim_end_matches('/'));
|
||||||
let resp = http_client()
|
let resp =
|
||||||
.get(&url)
|
http_client().get(&url).send().await.map_err(|e| {
|
||||||
.send()
|
TuskError::Ai(format!("Cannot connect to Ollama at {}: {}", ollama_url, e))
|
||||||
.await
|
})?;
|
||||||
.map_err(|e| TuskError::Ai(format!("Cannot connect to Ollama at {}: {}", ollama_url, e)))?;
|
|
||||||
|
|
||||||
if !resp.status().is_success() {
|
if !resp.status().is_success() {
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
@@ -119,7 +122,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
Err(last_error.unwrap_or_else(|| {
|
Err(last_error.unwrap_or_else(|| {
|
||||||
TuskError::Ai(format!("{} failed after {} attempts", operation, MAX_RETRIES))
|
TuskError::Ai(format!(
|
||||||
|
"{} failed after {} attempts",
|
||||||
|
operation, MAX_RETRIES
|
||||||
|
))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,10 +170,7 @@ async fn call_ollama_chat(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let model = settings.model.clone();
|
let model = settings.model.clone();
|
||||||
let url = format!(
|
let url = format!("{}/api/chat", settings.ollama_url.trim_end_matches('/'));
|
||||||
"{}/api/chat",
|
|
||||||
settings.ollama_url.trim_end_matches('/')
|
|
||||||
);
|
|
||||||
|
|
||||||
let request = OllamaChatRequest {
|
let request = OllamaChatRequest {
|
||||||
model: model.clone(),
|
model: model.clone(),
|
||||||
@@ -194,10 +197,7 @@ async fn call_ollama_chat(
|
|||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
TuskError::Ai(format!(
|
TuskError::Ai(format!("Cannot connect to Ollama at {}: {}", url, e))
|
||||||
"Cannot connect to Ollama at {}: {}",
|
|
||||||
url, e
|
|
||||||
))
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if !resp.status().is_success() {
|
if !resp.status().is_success() {
|
||||||
@@ -379,10 +379,7 @@ pub async fn fix_sql_error(
|
|||||||
schema_text
|
schema_text
|
||||||
);
|
);
|
||||||
|
|
||||||
let user_content = format!(
|
let user_content = format!("SQL query:\n{}\n\nError message:\n{}", sql, error_message);
|
||||||
"SQL query:\n{}\n\nError message:\n{}",
|
|
||||||
sql, error_message
|
|
||||||
);
|
|
||||||
|
|
||||||
let raw = call_ollama_chat(&app, &state, system_prompt, user_content).await?;
|
let raw = call_ollama_chat(&app, &state, system_prompt, user_content).await?;
|
||||||
Ok(clean_sql_response(&raw))
|
Ok(clean_sql_response(&raw))
|
||||||
@@ -405,9 +402,15 @@ pub(crate) async fn build_schema_context(
|
|||||||
|
|
||||||
// Run all metadata queries in parallel for speed
|
// Run all metadata queries in parallel for speed
|
||||||
let (
|
let (
|
||||||
version_res, col_res, fk_res, enum_res,
|
version_res,
|
||||||
tbl_comment_res, col_comment_res, unique_res,
|
col_res,
|
||||||
varchar_res, jsonb_res,
|
fk_res,
|
||||||
|
enum_res,
|
||||||
|
tbl_comment_res,
|
||||||
|
col_comment_res,
|
||||||
|
unique_res,
|
||||||
|
varchar_res,
|
||||||
|
jsonb_res,
|
||||||
) = tokio::join!(
|
) = tokio::join!(
|
||||||
sqlx::query_scalar::<_, String>("SELECT version()").fetch_one(&pool),
|
sqlx::query_scalar::<_, String>("SELECT version()").fetch_one(&pool),
|
||||||
fetch_columns(&pool),
|
fetch_columns(&pool),
|
||||||
@@ -586,10 +589,9 @@ pub(crate) async fn build_schema_context(
|
|||||||
// Unique constraints for this table
|
// Unique constraints for this table
|
||||||
let schema_table: Vec<&str> = full_name.splitn(2, '.').collect();
|
let schema_table: Vec<&str> = full_name.splitn(2, '.').collect();
|
||||||
if schema_table.len() == 2 {
|
if schema_table.len() == 2 {
|
||||||
if let Some(uqs) = unique_map.get(&(
|
if let Some(uqs) =
|
||||||
schema_table[0].to_string(),
|
unique_map.get(&(schema_table[0].to_string(), schema_table[1].to_string()))
|
||||||
schema_table[1].to_string(),
|
{
|
||||||
)) {
|
|
||||||
for uq_cols in uqs {
|
for uq_cols in uqs {
|
||||||
output.push(format!(" UNIQUE({})", uq_cols));
|
output.push(format!(" UNIQUE({})", uq_cols));
|
||||||
}
|
}
|
||||||
@@ -609,7 +611,9 @@ pub(crate) async fn build_schema_context(
|
|||||||
let result = output.join("\n");
|
let result = output.join("\n");
|
||||||
|
|
||||||
// Cache the result
|
// Cache the result
|
||||||
state.set_schema_cache(connection_id.to_string(), result.clone()).await;
|
state
|
||||||
|
.set_schema_cache(connection_id.to_string(), result.clone())
|
||||||
|
.await;
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
@@ -931,10 +935,7 @@ async fn fetch_jsonb_keys(
|
|||||||
|
|
||||||
let query = parts.join(" UNION ALL ");
|
let query = parts.join(" UNION ALL ");
|
||||||
|
|
||||||
let rows = match sqlx::query(&query)
|
let rows = match sqlx::query(&query).fetch_all(pool).await {
|
||||||
.fetch_all(pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::warn!("Failed to fetch JSONB keys: {}", e);
|
log::warn!("Failed to fetch JSONB keys: {}", e);
|
||||||
@@ -1033,6 +1034,26 @@ fn simplify_default(raw: &str) -> String {
|
|||||||
s.to_string()
|
s.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn validate_select_statement(sql: &str) -> TuskResult<()> {
|
||||||
|
let sql_upper = sql.trim().to_uppercase();
|
||||||
|
if !sql_upper.starts_with("SELECT") {
|
||||||
|
return Err(TuskError::Custom(
|
||||||
|
"Validation query must be a SELECT statement".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_index_ddl(ddl: &str) -> TuskResult<()> {
|
||||||
|
let ddl_upper = ddl.trim().to_uppercase();
|
||||||
|
if !ddl_upper.starts_with("CREATE INDEX") && !ddl_upper.starts_with("DROP INDEX") {
|
||||||
|
return Err(TuskError::Custom(
|
||||||
|
"Only CREATE INDEX and DROP INDEX statements are allowed".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn clean_sql_response(raw: &str) -> String {
|
fn clean_sql_response(raw: &str) -> String {
|
||||||
let trimmed = raw.trim();
|
let trimmed = raw.trim();
|
||||||
// Remove markdown code fences
|
// Remove markdown code fences
|
||||||
@@ -1098,18 +1119,13 @@ pub async fn run_validation_rule(
|
|||||||
sql: String,
|
sql: String,
|
||||||
sample_limit: Option<u32>,
|
sample_limit: Option<u32>,
|
||||||
) -> TuskResult<ValidationRule> {
|
) -> TuskResult<ValidationRule> {
|
||||||
let sql_upper = sql.trim().to_uppercase();
|
validate_select_statement(&sql)?;
|
||||||
if !sql_upper.starts_with("SELECT") {
|
|
||||||
return Err(TuskError::Custom(
|
|
||||||
"Validation query must be a SELECT statement".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let pool = state.get_pool(&connection_id).await?;
|
let pool = state.get_pool(&connection_id).await?;
|
||||||
let limit = sample_limit.unwrap_or(10);
|
let limit = sample_limit.unwrap_or(10);
|
||||||
let _start = Instant::now();
|
let _start = Instant::now();
|
||||||
|
|
||||||
let mut tx = (&pool).begin().await.map_err(TuskError::Database)?;
|
let mut tx = pool.begin().await.map_err(TuskError::Database)?;
|
||||||
sqlx::query("SET TRANSACTION READ ONLY")
|
sqlx::query("SET TRANSACTION READ ONLY")
|
||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
.await
|
.await
|
||||||
@@ -1199,7 +1215,13 @@ pub async fn suggest_validation_rules(
|
|||||||
schema_text
|
schema_text
|
||||||
);
|
);
|
||||||
|
|
||||||
let raw = call_ollama_chat(&app, &state, system_prompt, "Suggest validation rules".to_string()).await?;
|
let raw = call_ollama_chat(
|
||||||
|
&app,
|
||||||
|
&state,
|
||||||
|
system_prompt,
|
||||||
|
"Suggest validation rules".to_string(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let cleaned = raw.trim();
|
let cleaned = raw.trim();
|
||||||
let json_start = cleaned.find('[').unwrap_or(0);
|
let json_start = cleaned.find('[').unwrap_or(0);
|
||||||
@@ -1207,7 +1229,10 @@ pub async fn suggest_validation_rules(
|
|||||||
let json_str = &cleaned[json_start..json_end];
|
let json_str = &cleaned[json_start..json_end];
|
||||||
|
|
||||||
let rules: Vec<String> = serde_json::from_str(json_str).map_err(|e| {
|
let rules: Vec<String> = serde_json::from_str(json_str).map_err(|e| {
|
||||||
TuskError::Ai(format!("Failed to parse AI response as JSON array: {}. Response: {}", e, cleaned))
|
TuskError::Ai(format!(
|
||||||
|
"Failed to parse AI response as JSON array: {}. Response: {}",
|
||||||
|
e, cleaned
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(rules)
|
Ok(rules)
|
||||||
@@ -1226,13 +1251,16 @@ pub async fn generate_test_data_preview(
|
|||||||
) -> TuskResult<GeneratedDataPreview> {
|
) -> TuskResult<GeneratedDataPreview> {
|
||||||
let pool = state.get_pool(¶ms.connection_id).await?;
|
let pool = state.get_pool(¶ms.connection_id).await?;
|
||||||
|
|
||||||
let _ = app.emit("datagen-progress", DataGenProgress {
|
let _ = app.emit(
|
||||||
gen_id: gen_id.clone(),
|
"datagen-progress",
|
||||||
stage: "schema".to_string(),
|
DataGenProgress {
|
||||||
percent: 10,
|
gen_id: gen_id.clone(),
|
||||||
message: "Building schema context...".to_string(),
|
stage: "schema".to_string(),
|
||||||
detail: None,
|
percent: 10,
|
||||||
});
|
message: "Building schema context...".to_string(),
|
||||||
|
detail: None,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
let schema_text = build_schema_context(&state, ¶ms.connection_id).await?;
|
let schema_text = build_schema_context(&state, ¶ms.connection_id).await?;
|
||||||
|
|
||||||
@@ -1255,7 +1283,14 @@ pub async fn generate_test_data_preview(
|
|||||||
|
|
||||||
let fk_edges: Vec<(String, String, String, String)> = fk_rows
|
let fk_edges: Vec<(String, String, String, String)> = fk_rows
|
||||||
.iter()
|
.iter()
|
||||||
.map(|fk| (fk.schema.clone(), fk.table.clone(), fk.ref_schema.clone(), fk.ref_table.clone()))
|
.map(|fk| {
|
||||||
|
(
|
||||||
|
fk.schema.clone(),
|
||||||
|
fk.table.clone(),
|
||||||
|
fk.ref_schema.clone(),
|
||||||
|
fk.ref_table.clone(),
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let sorted_tables = topological_sort_tables(&fk_edges, &target_tables);
|
let sorted_tables = topological_sort_tables(&fk_edges, &target_tables);
|
||||||
@@ -1266,13 +1301,16 @@ pub async fn generate_test_data_preview(
|
|||||||
|
|
||||||
let row_count = params.row_count.min(1000);
|
let row_count = params.row_count.min(1000);
|
||||||
|
|
||||||
let _ = app.emit("datagen-progress", DataGenProgress {
|
let _ = app.emit(
|
||||||
gen_id: gen_id.clone(),
|
"datagen-progress",
|
||||||
stage: "generating".to_string(),
|
DataGenProgress {
|
||||||
percent: 30,
|
gen_id: gen_id.clone(),
|
||||||
message: "AI is generating test data...".to_string(),
|
stage: "generating".to_string(),
|
||||||
detail: None,
|
percent: 30,
|
||||||
});
|
message: "AI is generating test data...".to_string(),
|
||||||
|
detail: None,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
let tables_desc: Vec<String> = sorted_tables
|
let tables_desc: Vec<String> = sorted_tables
|
||||||
.iter()
|
.iter()
|
||||||
@@ -1329,13 +1367,16 @@ pub async fn generate_test_data_preview(
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let _ = app.emit("datagen-progress", DataGenProgress {
|
let _ = app.emit(
|
||||||
gen_id: gen_id.clone(),
|
"datagen-progress",
|
||||||
stage: "parsing".to_string(),
|
DataGenProgress {
|
||||||
percent: 80,
|
gen_id: gen_id.clone(),
|
||||||
message: "Parsing generated data...".to_string(),
|
stage: "parsing".to_string(),
|
||||||
detail: None,
|
percent: 80,
|
||||||
});
|
message: "Parsing generated data...".to_string(),
|
||||||
|
detail: None,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Parse JSON response
|
// Parse JSON response
|
||||||
let cleaned = raw.trim();
|
let cleaned = raw.trim();
|
||||||
@@ -1343,9 +1384,13 @@ pub async fn generate_test_data_preview(
|
|||||||
let json_end = cleaned.rfind('}').map(|i| i + 1).unwrap_or(cleaned.len());
|
let json_end = cleaned.rfind('}').map(|i| i + 1).unwrap_or(cleaned.len());
|
||||||
let json_str = &cleaned[json_start..json_end];
|
let json_str = &cleaned[json_start..json_end];
|
||||||
|
|
||||||
let data_map: HashMap<String, Vec<HashMap<String, Value>>> =
|
let data_map: HashMap<String, Vec<HashMap<String, Value>>> = serde_json::from_str(json_str)
|
||||||
serde_json::from_str(json_str).map_err(|e| {
|
.map_err(|e| {
|
||||||
TuskError::Ai(format!("Failed to parse generated data: {}. Response: {}", e, &cleaned[..cleaned.len().min(500)]))
|
TuskError::Ai(format!(
|
||||||
|
"Failed to parse generated data: {}. Response: {}",
|
||||||
|
e,
|
||||||
|
&cleaned[..cleaned.len().min(500)]
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut tables = Vec::new();
|
let mut tables = Vec::new();
|
||||||
@@ -1362,7 +1407,12 @@ pub async fn generate_test_data_preview(
|
|||||||
|
|
||||||
let rows: Vec<Vec<Value>> = rows_data
|
let rows: Vec<Vec<Value>> = rows_data
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row_map| columns.iter().map(|col| row_map.get(col).cloned().unwrap_or(Value::Null)).collect())
|
.map(|row_map| {
|
||||||
|
columns
|
||||||
|
.iter()
|
||||||
|
.map(|col| row_map.get(col).cloned().unwrap_or(Value::Null))
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let count = rows.len() as u32;
|
let count = rows.len() as u32;
|
||||||
@@ -1378,13 +1428,20 @@ pub async fn generate_test_data_preview(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ = app.emit("datagen-progress", DataGenProgress {
|
let _ = app.emit(
|
||||||
gen_id: gen_id.clone(),
|
"datagen-progress",
|
||||||
stage: "done".to_string(),
|
DataGenProgress {
|
||||||
percent: 100,
|
gen_id: gen_id.clone(),
|
||||||
message: "Data generation complete".to_string(),
|
stage: "done".to_string(),
|
||||||
detail: Some(format!("{} rows across {} tables", total_rows, tables.len())),
|
percent: 100,
|
||||||
});
|
message: "Data generation complete".to_string(),
|
||||||
|
detail: Some(format!(
|
||||||
|
"{} rows across {} tables",
|
||||||
|
total_rows,
|
||||||
|
tables.len()
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
Ok(GeneratedDataPreview {
|
Ok(GeneratedDataPreview {
|
||||||
tables,
|
tables,
|
||||||
@@ -1404,7 +1461,7 @@ pub async fn insert_generated_data(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let pool = state.get_pool(&connection_id).await?;
|
let pool = state.get_pool(&connection_id).await?;
|
||||||
let mut tx = (&pool).begin().await.map_err(TuskError::Database)?;
|
let mut tx = pool.begin().await.map_err(TuskError::Database)?;
|
||||||
|
|
||||||
// Defer constraints for circular FKs
|
// Defer constraints for circular FKs
|
||||||
sqlx::query("SET CONSTRAINTS ALL DEFERRED")
|
sqlx::query("SET CONSTRAINTS ALL DEFERRED")
|
||||||
@@ -1466,20 +1523,38 @@ pub async fn get_index_advisor_report(
|
|||||||
) -> TuskResult<IndexAdvisorReport> {
|
) -> TuskResult<IndexAdvisorReport> {
|
||||||
let pool = state.get_pool(&connection_id).await?;
|
let pool = state.get_pool(&connection_id).await?;
|
||||||
|
|
||||||
// Fetch table stats
|
// Fetch table stats, index stats, and slow queries concurrently
|
||||||
let table_stats_rows = sqlx::query(
|
let (table_stats_res, index_stats_res, slow_queries_res) = tokio::join!(
|
||||||
"SELECT schemaname, relname, seq_scan, idx_scan, n_live_tup, \
|
sqlx::query(
|
||||||
pg_size_pretty(pg_total_relation_size(schemaname || '.' || relname)) AS table_size, \
|
"SELECT schemaname, relname, seq_scan, idx_scan, n_live_tup, \
|
||||||
pg_size_pretty(pg_indexes_size(quote_ident(schemaname) || '.' || quote_ident(relname))) AS index_size \
|
pg_size_pretty(pg_total_relation_size(schemaname || '.' || relname)) AS table_size, \
|
||||||
FROM pg_stat_user_tables \
|
pg_size_pretty(pg_indexes_size(quote_ident(schemaname) || '.' || quote_ident(relname))) AS index_size \
|
||||||
ORDER BY seq_scan DESC \
|
FROM pg_stat_user_tables \
|
||||||
LIMIT 50"
|
ORDER BY seq_scan DESC \
|
||||||
)
|
LIMIT 50"
|
||||||
.fetch_all(&pool)
|
)
|
||||||
.await
|
.fetch_all(&pool),
|
||||||
.map_err(TuskError::Database)?;
|
sqlx::query(
|
||||||
|
"SELECT schemaname, relname, indexrelname, idx_scan, \
|
||||||
|
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size, \
|
||||||
|
pg_get_indexdef(indexrelid) AS definition \
|
||||||
|
FROM pg_stat_user_indexes \
|
||||||
|
ORDER BY idx_scan ASC \
|
||||||
|
LIMIT 50",
|
||||||
|
)
|
||||||
|
.fetch_all(&pool),
|
||||||
|
sqlx::query(
|
||||||
|
"SELECT query, calls, total_exec_time, mean_exec_time, rows \
|
||||||
|
FROM pg_stat_statements \
|
||||||
|
WHERE calls > 0 \
|
||||||
|
ORDER BY mean_exec_time DESC \
|
||||||
|
LIMIT 20",
|
||||||
|
)
|
||||||
|
.fetch_all(&pool),
|
||||||
|
);
|
||||||
|
|
||||||
let table_stats: Vec<TableStats> = table_stats_rows
|
let table_stats: Vec<TableStats> = table_stats_res
|
||||||
|
.map_err(TuskError::Database)?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| TableStats {
|
.map(|r| TableStats {
|
||||||
schema: r.get(0),
|
schema: r.get(0),
|
||||||
@@ -1492,20 +1567,8 @@ pub async fn get_index_advisor_report(
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Fetch index stats
|
let index_stats: Vec<IndexStats> = index_stats_res
|
||||||
let index_stats_rows = sqlx::query(
|
.map_err(TuskError::Database)?
|
||||||
"SELECT schemaname, relname, indexrelname, idx_scan, \
|
|
||||||
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size, \
|
|
||||||
pg_get_indexdef(indexrelid) AS definition \
|
|
||||||
FROM pg_stat_user_indexes \
|
|
||||||
ORDER BY idx_scan ASC \
|
|
||||||
LIMIT 50"
|
|
||||||
)
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
.map_err(TuskError::Database)?;
|
|
||||||
|
|
||||||
let index_stats: Vec<IndexStats> = index_stats_rows
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| IndexStats {
|
.map(|r| IndexStats {
|
||||||
schema: r.get(0),
|
schema: r.get(0),
|
||||||
@@ -1517,17 +1580,7 @@ pub async fn get_index_advisor_report(
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Fetch slow queries (graceful if pg_stat_statements not available)
|
let (slow_queries, has_pg_stat_statements) = match slow_queries_res {
|
||||||
let (slow_queries, has_pg_stat_statements) = match sqlx::query(
|
|
||||||
"SELECT query, calls, total_exec_time, mean_exec_time, rows \
|
|
||||||
FROM pg_stat_statements \
|
|
||||||
WHERE calls > 0 \
|
|
||||||
ORDER BY mean_exec_time DESC \
|
|
||||||
LIMIT 20"
|
|
||||||
)
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(rows) => {
|
Ok(rows) => {
|
||||||
let queries: Vec<SlowQuery> = rows
|
let queries: Vec<SlowQuery> = rows
|
||||||
.iter()
|
.iter()
|
||||||
@@ -1551,7 +1604,13 @@ pub async fn get_index_advisor_report(
|
|||||||
for ts in &table_stats {
|
for ts in &table_stats {
|
||||||
stats_text.push_str(&format!(
|
stats_text.push_str(&format!(
|
||||||
" {}.{}: seq_scan={}, idx_scan={}, rows={}, size={}, idx_size={}\n",
|
" {}.{}: seq_scan={}, idx_scan={}, rows={}, size={}, idx_size={}\n",
|
||||||
ts.schema, ts.table, ts.seq_scan, ts.idx_scan, ts.n_live_tup, ts.table_size, ts.index_size
|
ts.schema,
|
||||||
|
ts.table,
|
||||||
|
ts.seq_scan,
|
||||||
|
ts.idx_scan,
|
||||||
|
ts.n_live_tup,
|
||||||
|
ts.table_size,
|
||||||
|
ts.index_size
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1568,7 +1627,10 @@ pub async fn get_index_advisor_report(
|
|||||||
for sq in &slow_queries {
|
for sq in &slow_queries {
|
||||||
stats_text.push_str(&format!(
|
stats_text.push_str(&format!(
|
||||||
" calls={}, mean={:.1}ms, total={:.1}ms, rows={}: {}\n",
|
" calls={}, mean={:.1}ms, total={:.1}ms, rows={}: {}\n",
|
||||||
sq.calls, sq.mean_time_ms, sq.total_time_ms, sq.rows,
|
sq.calls,
|
||||||
|
sq.mean_time_ms,
|
||||||
|
sq.total_time_ms,
|
||||||
|
sq.rows,
|
||||||
sq.query.chars().take(200).collect::<String>()
|
sq.query.chars().take(200).collect::<String>()
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@@ -1635,12 +1697,7 @@ pub async fn apply_index_recommendation(
|
|||||||
return Err(TuskError::ReadOnly);
|
return Err(TuskError::ReadOnly);
|
||||||
}
|
}
|
||||||
|
|
||||||
let ddl_upper = ddl.trim().to_uppercase();
|
validate_index_ddl(&ddl)?;
|
||||||
if !ddl_upper.starts_with("CREATE INDEX") && !ddl_upper.starts_with("DROP INDEX") {
|
|
||||||
return Err(TuskError::Custom(
|
|
||||||
"Only CREATE INDEX and DROP INDEX statements are allowed".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let pool = state.get_pool(&connection_id).await?;
|
let pool = state.get_pool(&connection_id).await?;
|
||||||
|
|
||||||
@@ -1655,3 +1712,151 @@ pub async fn apply_index_recommendation(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// ── validate_select_statement ─────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_valid_simple() {
|
||||||
|
assert!(validate_select_statement("SELECT 1").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_valid_with_leading_whitespace() {
|
||||||
|
assert!(validate_select_statement(" SELECT * FROM users").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_valid_lowercase() {
|
||||||
|
assert!(validate_select_statement("select * from users").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_rejects_insert() {
|
||||||
|
assert!(validate_select_statement("INSERT INTO users VALUES (1)").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_rejects_delete() {
|
||||||
|
assert!(validate_select_statement("DELETE FROM users").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_rejects_drop() {
|
||||||
|
assert!(validate_select_statement("DROP TABLE users").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_rejects_empty() {
|
||||||
|
assert!(validate_select_statement("").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn select_rejects_whitespace_only() {
|
||||||
|
assert!(validate_select_statement(" ").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This test documents a known weakness — SELECT prefix allows injection
|
||||||
|
#[test]
|
||||||
|
fn select_allows_semicolon_after_select() {
|
||||||
|
// "SELECT 1; DROP TABLE users" starts with SELECT — passes validation
|
||||||
|
// This is a known limitation documented in the review
|
||||||
|
assert!(validate_select_statement("SELECT 1; DROP TABLE users").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── validate_index_ddl ────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_valid_create_index() {
|
||||||
|
assert!(validate_index_ddl("CREATE INDEX idx_name ON users(email)").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_valid_create_index_concurrently() {
|
||||||
|
assert!(validate_index_ddl("CREATE INDEX CONCURRENTLY idx ON t(c)").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_valid_drop_index() {
|
||||||
|
assert!(validate_index_ddl("DROP INDEX idx_name").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_valid_with_leading_whitespace() {
|
||||||
|
assert!(validate_index_ddl(" CREATE INDEX idx ON t(c)").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_valid_lowercase() {
|
||||||
|
assert!(validate_index_ddl("create index idx on t(c)").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_rejects_create_table() {
|
||||||
|
assert!(validate_index_ddl("CREATE TABLE evil(id int)").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_rejects_drop_table() {
|
||||||
|
assert!(validate_index_ddl("DROP TABLE users").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_rejects_alter_table() {
|
||||||
|
assert!(validate_index_ddl("ALTER TABLE users ADD COLUMN x int").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ddl_rejects_empty() {
|
||||||
|
assert!(validate_index_ddl("").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Documents bypass weakness — semicolon after valid prefix
|
||||||
|
#[test]
|
||||||
|
fn ddl_allows_semicolon_injection() {
|
||||||
|
// "CREATE INDEX x ON t(c); DROP TABLE users" — passes validation
|
||||||
|
// Mitigated by sqlx single-statement execution
|
||||||
|
assert!(validate_index_ddl("CREATE INDEX x ON t(c); DROP TABLE users").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── clean_sql_response ────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_plain() {
|
||||||
|
assert_eq!(clean_sql_response("SELECT 1"), "SELECT 1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_with_fences() {
|
||||||
|
assert_eq!(clean_sql_response("```sql\nSELECT 1\n```"), "SELECT 1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_with_generic_fences() {
|
||||||
|
assert_eq!(clean_sql_response("```\nSELECT 1\n```"), "SELECT 1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_with_postgresql_fences() {
|
||||||
|
assert_eq!(
|
||||||
|
clean_sql_response("```postgresql\nSELECT 1\n```"),
|
||||||
|
"SELECT 1"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_with_whitespace() {
|
||||||
|
assert_eq!(clean_sql_response(" SELECT 1 "), "SELECT 1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clean_sql_no_fences_multiline() {
|
||||||
|
assert_eq!(
|
||||||
|
clean_sql_response("SELECT\n *\nFROM users"),
|
||||||
|
"SELECT\n *\nFROM users"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -63,10 +63,7 @@ fn shell_escape(s: &str) -> String {
|
|||||||
|
|
||||||
/// Validate pg_version matches a safe pattern (e.g. "16", "16.2", "17.1")
|
/// Validate pg_version matches a safe pattern (e.g. "16", "16.2", "17.1")
|
||||||
fn validate_pg_version(version: &str) -> TuskResult<()> {
|
fn validate_pg_version(version: &str) -> TuskResult<()> {
|
||||||
let is_valid = !version.is_empty()
|
let is_valid = !version.is_empty() && version.chars().all(|c| c.is_ascii_digit() || c == '.');
|
||||||
&& version
|
|
||||||
.chars()
|
|
||||||
.all(|c| c.is_ascii_digit() || c == '.');
|
|
||||||
if !is_valid {
|
if !is_valid {
|
||||||
return Err(docker_err(format!(
|
return Err(docker_err(format!(
|
||||||
"Invalid pg_version '{}': must contain only digits and dots (e.g. '16', '16.2')",
|
"Invalid pg_version '{}': must contain only digits and dots (e.g. '16', '16.2')",
|
||||||
@@ -116,7 +113,9 @@ pub async fn check_docker(state: State<'_, Arc<AppState>>) -> TuskResult<DockerS
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn list_tusk_containers(state: State<'_, Arc<AppState>>) -> TuskResult<Vec<TuskContainer>> {
|
pub async fn list_tusk_containers(
|
||||||
|
state: State<'_, Arc<AppState>>,
|
||||||
|
) -> TuskResult<Vec<TuskContainer>> {
|
||||||
let output = docker_cmd(&state)
|
let output = docker_cmd(&state)
|
||||||
.await
|
.await
|
||||||
.args([
|
.args([
|
||||||
@@ -234,8 +233,8 @@ async fn check_docker_internal(docker_host: &Option<String>) -> TuskResult<Docke
|
|||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string();
|
let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string();
|
||||||
let daemon_running = !stderr.contains("Cannot connect")
|
let daemon_running =
|
||||||
&& !stderr.contains("connection refused");
|
!stderr.contains("Cannot connect") && !stderr.contains("connection refused");
|
||||||
Ok(DockerStatus {
|
Ok(DockerStatus {
|
||||||
installed: true,
|
installed: true,
|
||||||
daemon_running,
|
daemon_running,
|
||||||
@@ -266,7 +265,14 @@ async fn do_clone(
|
|||||||
let docker_host = state.docker_host.read().await.clone();
|
let docker_host = state.docker_host.read().await.clone();
|
||||||
|
|
||||||
// Step 1: Check Docker
|
// Step 1: Check Docker
|
||||||
emit_progress(app, clone_id, "checking", 5, "Checking Docker availability...", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"checking",
|
||||||
|
5,
|
||||||
|
"Checking Docker availability...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
let status = check_docker_internal(&docker_host).await?;
|
let status = check_docker_internal(&docker_host).await?;
|
||||||
if !status.installed || !status.daemon_running {
|
if !status.installed || !status.daemon_running {
|
||||||
let msg = status
|
let msg = status
|
||||||
@@ -282,23 +288,45 @@ async fn do_clone(
|
|||||||
Some(p) => p,
|
Some(p) => p,
|
||||||
None => find_free_port().await?,
|
None => find_free_port().await?,
|
||||||
};
|
};
|
||||||
emit_progress(app, clone_id, "port", 10, &format!("Using port {}", host_port), None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"port",
|
||||||
|
10,
|
||||||
|
&format!("Using port {}", host_port),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
// Step 3: Create container
|
// Step 3: Create container
|
||||||
emit_progress(app, clone_id, "container", 20, "Creating PostgreSQL container...", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"container",
|
||||||
|
20,
|
||||||
|
"Creating PostgreSQL container...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
let pg_password = params.postgres_password.as_deref().unwrap_or("tusk");
|
let pg_password = params.postgres_password.as_deref().unwrap_or("tusk");
|
||||||
let image = format!("postgres:{}", params.pg_version);
|
let image = format!("postgres:{}", params.pg_version);
|
||||||
|
|
||||||
let create_output = docker_cmd_sync(&docker_host)
|
let create_output = docker_cmd_sync(&docker_host)
|
||||||
.args([
|
.args([
|
||||||
"run", "-d",
|
"run",
|
||||||
"--name", ¶ms.container_name,
|
"-d",
|
||||||
"-p", &format!("{}:5432", host_port),
|
"--name",
|
||||||
"-e", &format!("POSTGRES_PASSWORD={}", pg_password),
|
¶ms.container_name,
|
||||||
"-l", "tusk.managed=true",
|
"-p",
|
||||||
"-l", &format!("tusk.source-db={}", params.source_database),
|
&format!("{}:5432", host_port),
|
||||||
"-l", &format!("tusk.source-connection={}", params.source_connection_id),
|
"-e",
|
||||||
"-l", &format!("tusk.pg-version={}", params.pg_version),
|
&format!("POSTGRES_PASSWORD={}", pg_password),
|
||||||
|
"-l",
|
||||||
|
"tusk.managed=true",
|
||||||
|
"-l",
|
||||||
|
&format!("tusk.source-db={}", params.source_database),
|
||||||
|
"-l",
|
||||||
|
&format!("tusk.source-connection={}", params.source_connection_id),
|
||||||
|
"-l",
|
||||||
|
&format!("tusk.pg-version={}", params.pg_version),
|
||||||
&image,
|
&image,
|
||||||
])
|
])
|
||||||
.output()
|
.output()
|
||||||
@@ -306,24 +334,56 @@ async fn do_clone(
|
|||||||
.map_err(|e| docker_err(format!("Failed to create container: {}", e)))?;
|
.map_err(|e| docker_err(format!("Failed to create container: {}", e)))?;
|
||||||
|
|
||||||
if !create_output.status.success() {
|
if !create_output.status.success() {
|
||||||
let stderr = String::from_utf8_lossy(&create_output.stderr).trim().to_string();
|
let stderr = String::from_utf8_lossy(&create_output.stderr)
|
||||||
emit_progress(app, clone_id, "error", 20, &format!("Failed to create container: {}", stderr), None);
|
.trim()
|
||||||
return Err(docker_err(format!("Failed to create container: {}", stderr)));
|
.to_string();
|
||||||
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"error",
|
||||||
|
20,
|
||||||
|
&format!("Failed to create container: {}", stderr),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
return Err(docker_err(format!(
|
||||||
|
"Failed to create container: {}",
|
||||||
|
stderr
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let container_id = String::from_utf8_lossy(&create_output.stdout).trim().to_string();
|
let container_id = String::from_utf8_lossy(&create_output.stdout)
|
||||||
|
.trim()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
// Step 4: Wait for PostgreSQL to be ready
|
// Step 4: Wait for PostgreSQL to be ready
|
||||||
emit_progress(app, clone_id, "waiting", 30, "Waiting for PostgreSQL to be ready...", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"waiting",
|
||||||
|
30,
|
||||||
|
"Waiting for PostgreSQL to be ready...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
wait_for_pg_ready(&docker_host, ¶ms.container_name, 30).await?;
|
wait_for_pg_ready(&docker_host, ¶ms.container_name, 30).await?;
|
||||||
emit_progress(app, clone_id, "waiting", 35, "PostgreSQL is ready", None);
|
emit_progress(app, clone_id, "waiting", 35, "PostgreSQL is ready", None);
|
||||||
|
|
||||||
// Step 5: Create target database
|
// Step 5: Create target database
|
||||||
emit_progress(app, clone_id, "database", 35, &format!("Creating database '{}'...", params.source_database), None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"database",
|
||||||
|
35,
|
||||||
|
&format!("Creating database '{}'...", params.source_database),
|
||||||
|
None,
|
||||||
|
);
|
||||||
let create_db_output = docker_cmd_sync(&docker_host)
|
let create_db_output = docker_cmd_sync(&docker_host)
|
||||||
.args([
|
.args([
|
||||||
"exec", ¶ms.container_name,
|
"exec",
|
||||||
"psql", "-U", "postgres", "-c",
|
¶ms.container_name,
|
||||||
|
"psql",
|
||||||
|
"-U",
|
||||||
|
"postgres",
|
||||||
|
"-c",
|
||||||
&format!("CREATE DATABASE {}", escape_ident(¶ms.source_database)),
|
&format!("CREATE DATABASE {}", escape_ident(¶ms.source_database)),
|
||||||
])
|
])
|
||||||
.output()
|
.output()
|
||||||
@@ -331,20 +391,42 @@ async fn do_clone(
|
|||||||
.map_err(|e| docker_err(format!("Failed to create database: {}", e)))?;
|
.map_err(|e| docker_err(format!("Failed to create database: {}", e)))?;
|
||||||
|
|
||||||
if !create_db_output.status.success() {
|
if !create_db_output.status.success() {
|
||||||
let stderr = String::from_utf8_lossy(&create_db_output.stderr).trim().to_string();
|
let stderr = String::from_utf8_lossy(&create_db_output.stderr)
|
||||||
|
.trim()
|
||||||
|
.to_string();
|
||||||
if !stderr.contains("already exists") {
|
if !stderr.contains("already exists") {
|
||||||
emit_progress(app, clone_id, "error", 35, &format!("Failed to create database: {}", stderr), None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"error",
|
||||||
|
35,
|
||||||
|
&format!("Failed to create database: {}", stderr),
|
||||||
|
None,
|
||||||
|
);
|
||||||
return Err(docker_err(format!("Failed to create database: {}", stderr)));
|
return Err(docker_err(format!("Failed to create database: {}", stderr)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 6: Get source connection URL (using the specific database to clone)
|
// Step 6: Get source connection URL (using the specific database to clone)
|
||||||
emit_progress(app, clone_id, "dump", 40, "Preparing data transfer...", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"dump",
|
||||||
|
40,
|
||||||
|
"Preparing data transfer...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
let source_config = load_connection_config(app, ¶ms.source_connection_id)?;
|
let source_config = load_connection_config(app, ¶ms.source_connection_id)?;
|
||||||
let source_url = source_config.connection_url_for_db(¶ms.source_database);
|
let source_url = source_config.connection_url_for_db(¶ms.source_database);
|
||||||
emit_progress(
|
emit_progress(
|
||||||
app, clone_id, "dump", 40,
|
app,
|
||||||
&format!("Source: {}@{}:{}/{}", source_config.user, source_config.host, source_config.port, params.source_database),
|
clone_id,
|
||||||
|
"dump",
|
||||||
|
40,
|
||||||
|
&format!(
|
||||||
|
"Source: {}@{}:{}/{}",
|
||||||
|
source_config.user, source_config.host, source_config.port, params.source_database
|
||||||
|
),
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -352,23 +434,84 @@ async fn do_clone(
|
|||||||
match params.clone_mode {
|
match params.clone_mode {
|
||||||
CloneMode::SchemaOnly => {
|
CloneMode::SchemaOnly => {
|
||||||
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
|
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
|
||||||
transfer_schema_only(app, clone_id, &source_url, ¶ms.container_name, ¶ms.source_database, ¶ms.pg_version, &docker_host).await?;
|
transfer_schema_only(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
&source_url,
|
||||||
|
¶ms.container_name,
|
||||||
|
¶ms.source_database,
|
||||||
|
¶ms.pg_version,
|
||||||
|
&docker_host,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
CloneMode::FullClone => {
|
CloneMode::FullClone => {
|
||||||
emit_progress(app, clone_id, "transfer", 45, "Performing full database clone...", None);
|
emit_progress(
|
||||||
transfer_full_clone(app, clone_id, &source_url, ¶ms.container_name, ¶ms.source_database, ¶ms.pg_version, &docker_host).await?;
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
45,
|
||||||
|
"Performing full database clone...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
transfer_full_clone(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
&source_url,
|
||||||
|
¶ms.container_name,
|
||||||
|
¶ms.source_database,
|
||||||
|
¶ms.pg_version,
|
||||||
|
&docker_host,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
CloneMode::SampleData => {
|
CloneMode::SampleData => {
|
||||||
|
let has_local = try_local_pg_dump().await;
|
||||||
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
|
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
|
||||||
transfer_schema_only(app, clone_id, &source_url, ¶ms.container_name, ¶ms.source_database, ¶ms.pg_version, &docker_host).await?;
|
transfer_schema_only_with(
|
||||||
emit_progress(app, clone_id, "transfer", 65, "Copying sample data...", None);
|
app,
|
||||||
|
clone_id,
|
||||||
|
&source_url,
|
||||||
|
¶ms.container_name,
|
||||||
|
¶ms.source_database,
|
||||||
|
¶ms.pg_version,
|
||||||
|
&docker_host,
|
||||||
|
has_local,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
65,
|
||||||
|
"Copying sample data...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
let sample_rows = params.sample_rows.unwrap_or(1000);
|
let sample_rows = params.sample_rows.unwrap_or(1000);
|
||||||
transfer_sample_data(app, clone_id, &source_url, ¶ms.container_name, ¶ms.source_database, ¶ms.pg_version, sample_rows, &docker_host).await?;
|
transfer_sample_data_with(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
&source_url,
|
||||||
|
¶ms.container_name,
|
||||||
|
¶ms.source_database,
|
||||||
|
¶ms.pg_version,
|
||||||
|
sample_rows,
|
||||||
|
&docker_host,
|
||||||
|
has_local,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 8: Save connection in Tusk
|
// Step 8: Save connection in Tusk
|
||||||
emit_progress(app, clone_id, "connection", 90, "Saving connection...", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"connection",
|
||||||
|
90,
|
||||||
|
"Saving connection...",
|
||||||
|
None,
|
||||||
|
);
|
||||||
let connection_id = uuid::Uuid::new_v4().to_string();
|
let connection_id = uuid::Uuid::new_v4().to_string();
|
||||||
let new_config = ConnectionConfig {
|
let new_config = ConnectionConfig {
|
||||||
id: connection_id.clone(),
|
id: connection_id.clone(),
|
||||||
@@ -407,7 +550,14 @@ async fn do_clone(
|
|||||||
connection_url,
|
connection_url,
|
||||||
};
|
};
|
||||||
|
|
||||||
emit_progress(app, clone_id, "done", 100, "Clone completed successfully!", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"done",
|
||||||
|
100,
|
||||||
|
"Clone completed successfully!",
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
@@ -424,7 +574,11 @@ async fn find_free_port() -> TuskResult<u16> {
|
|||||||
Ok(port)
|
Ok(port)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_pg_ready(docker_host: &Option<String>, container_name: &str, timeout_secs: u64) -> TuskResult<()> {
|
async fn wait_for_pg_ready(
|
||||||
|
docker_host: &Option<String>,
|
||||||
|
container_name: &str,
|
||||||
|
timeout_secs: u64,
|
||||||
|
) -> TuskResult<()> {
|
||||||
let start = std::time::Instant::now();
|
let start = std::time::Instant::now();
|
||||||
let timeout = std::time::Duration::from_secs(timeout_secs);
|
let timeout = std::time::Duration::from_secs(timeout_secs);
|
||||||
|
|
||||||
@@ -466,7 +620,13 @@ fn docker_host_flag(docker_host: &Option<String>) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Build the pg_dump portion of a shell command
|
/// Build the pg_dump portion of a shell command
|
||||||
fn pg_dump_shell_cmd(has_local: bool, pg_version: &str, extra_args: &str, source_url: &str, docker_host: &Option<String>) -> String {
|
fn pg_dump_shell_cmd(
|
||||||
|
has_local: bool,
|
||||||
|
pg_version: &str,
|
||||||
|
extra_args: &str,
|
||||||
|
source_url: &str,
|
||||||
|
docker_host: &Option<String>,
|
||||||
|
) -> String {
|
||||||
let escaped_url = shell_escape(source_url);
|
let escaped_url = shell_escape(source_url);
|
||||||
if has_local {
|
if has_local {
|
||||||
format!("pg_dump {} '{}'", extra_args, escaped_url)
|
format!("pg_dump {} '{}'", extra_args, escaped_url)
|
||||||
@@ -503,7 +663,8 @@ async fn run_pipe_cmd(
|
|||||||
if !stderr.is_empty() {
|
if !stderr.is_empty() {
|
||||||
// Truncate for progress display (full log can be long)
|
// Truncate for progress display (full log can be long)
|
||||||
let short = if stderr.len() > 500 {
|
let short = if stderr.len() > 500 {
|
||||||
let truncated = stderr.char_indices()
|
let truncated = stderr
|
||||||
|
.char_indices()
|
||||||
.nth(500)
|
.nth(500)
|
||||||
.map(|(i, _)| &stderr[..i])
|
.map(|(i, _)| &stderr[..i])
|
||||||
.unwrap_or(&stderr);
|
.unwrap_or(&stderr);
|
||||||
@@ -511,33 +672,57 @@ async fn run_pipe_cmd(
|
|||||||
} else {
|
} else {
|
||||||
stderr.clone()
|
stderr.clone()
|
||||||
};
|
};
|
||||||
emit_progress(app, clone_id, "transfer", 55, &format!("{}: stderr output", label), Some(&short));
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
55,
|
||||||
|
&format!("{}: stderr output", label),
|
||||||
|
Some(&short),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count DDL statements in stdout for feedback
|
// Count DDL statements in stdout for feedback
|
||||||
if !stdout.is_empty() {
|
if !stdout.is_empty() {
|
||||||
let creates = stdout.lines()
|
let creates = stdout
|
||||||
|
.lines()
|
||||||
.filter(|l| l.starts_with("CREATE") || l.starts_with("ALTER") || l.starts_with("SET"))
|
.filter(|l| l.starts_with("CREATE") || l.starts_with("ALTER") || l.starts_with("SET"))
|
||||||
.count();
|
.count();
|
||||||
if creates > 0 {
|
if creates > 0 {
|
||||||
emit_progress(app, clone_id, "transfer", 58, &format!("Applied {} SQL statements", creates), None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
58,
|
||||||
|
&format!("Applied {} SQL statements", creates),
|
||||||
|
None,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let code = output.status.code().unwrap_or(-1);
|
let code = output.status.code().unwrap_or(-1);
|
||||||
emit_progress(
|
emit_progress(
|
||||||
app, clone_id, "transfer", 55,
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
55,
|
||||||
&format!("{} exited with code {}", label, code),
|
&format!("{} exited with code {}", label, code),
|
||||||
Some(&stderr),
|
Some(&stderr),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Only hard-fail on connection / fatal errors
|
// Only hard-fail on connection / fatal errors
|
||||||
if stderr.contains("FATAL") || stderr.contains("could not connect")
|
if stderr.contains("FATAL")
|
||||||
|| stderr.contains("No such file") || stderr.contains("password authentication failed")
|
|| stderr.contains("could not connect")
|
||||||
|| stderr.contains("does not exist") || (stdout.is_empty() && stderr.is_empty())
|
|| stderr.contains("No such file")
|
||||||
|
|| stderr.contains("password authentication failed")
|
||||||
|
|| stderr.contains("does not exist")
|
||||||
|
|| (stdout.is_empty() && stderr.is_empty())
|
||||||
{
|
{
|
||||||
return Err(docker_err(format!("{} failed (exit {}): {}", label, code, stderr)));
|
return Err(docker_err(format!(
|
||||||
|
"{} failed (exit {}): {}",
|
||||||
|
label, code, stderr
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,20 +739,61 @@ async fn transfer_schema_only(
|
|||||||
docker_host: &Option<String>,
|
docker_host: &Option<String>,
|
||||||
) -> TuskResult<()> {
|
) -> TuskResult<()> {
|
||||||
let has_local = try_local_pg_dump().await;
|
let has_local = try_local_pg_dump().await;
|
||||||
let label = if has_local { "local pg_dump" } else { "Docker-based pg_dump" };
|
transfer_schema_only_with(app, clone_id, source_url, container_name, database, pg_version, docker_host, has_local).await
|
||||||
emit_progress(app, clone_id, "transfer", 48, &format!("Using {} for schema...", label), None);
|
}
|
||||||
|
|
||||||
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--schema-only --no-owner --no-acl", source_url, docker_host);
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn transfer_schema_only_with(
|
||||||
|
app: &AppHandle,
|
||||||
|
clone_id: &str,
|
||||||
|
source_url: &str,
|
||||||
|
container_name: &str,
|
||||||
|
database: &str,
|
||||||
|
pg_version: &str,
|
||||||
|
docker_host: &Option<String>,
|
||||||
|
has_local: bool,
|
||||||
|
) -> TuskResult<()> {
|
||||||
|
let label = if has_local {
|
||||||
|
"local pg_dump"
|
||||||
|
} else {
|
||||||
|
"Docker-based pg_dump"
|
||||||
|
};
|
||||||
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
48,
|
||||||
|
&format!("Using {} for schema...", label),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
|
let dump_cmd = pg_dump_shell_cmd(
|
||||||
|
has_local,
|
||||||
|
pg_version,
|
||||||
|
"--schema-only --no-owner --no-acl",
|
||||||
|
source_url,
|
||||||
|
docker_host,
|
||||||
|
);
|
||||||
let escaped_db = shell_escape(database);
|
let escaped_db = shell_escape(database);
|
||||||
let host_flag = docker_host_flag(docker_host);
|
let host_flag = docker_host_flag(docker_host);
|
||||||
let pipe_cmd = format!(
|
let pipe_cmd = format!(
|
||||||
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
|
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
|
||||||
dump_cmd, host_flag, shell_escape(container_name), escaped_db
|
dump_cmd,
|
||||||
|
host_flag,
|
||||||
|
shell_escape(container_name),
|
||||||
|
escaped_db
|
||||||
);
|
);
|
||||||
|
|
||||||
run_pipe_cmd(app, clone_id, &pipe_cmd, "Schema transfer").await?;
|
run_pipe_cmd(app, clone_id, &pipe_cmd, "Schema transfer").await?;
|
||||||
|
|
||||||
emit_progress(app, clone_id, "transfer", 60, "Schema transferred successfully", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
60,
|
||||||
|
"Schema transferred successfully",
|
||||||
|
None,
|
||||||
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -581,16 +807,36 @@ async fn transfer_full_clone(
|
|||||||
docker_host: &Option<String>,
|
docker_host: &Option<String>,
|
||||||
) -> TuskResult<()> {
|
) -> TuskResult<()> {
|
||||||
let has_local = try_local_pg_dump().await;
|
let has_local = try_local_pg_dump().await;
|
||||||
let label = if has_local { "local pg_dump" } else { "Docker-based pg_dump" };
|
let label = if has_local {
|
||||||
emit_progress(app, clone_id, "transfer", 48, &format!("Using {} for full clone...", label), None);
|
"local pg_dump"
|
||||||
|
} else {
|
||||||
|
"Docker-based pg_dump"
|
||||||
|
};
|
||||||
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
48,
|
||||||
|
&format!("Using {} for full clone...", label),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
// Use plain text format piped to psql (more reliable than -Fc | pg_restore through docker exec)
|
// Use plain text format piped to psql (more reliable than -Fc | pg_restore through docker exec)
|
||||||
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--no-owner --no-acl", source_url, docker_host);
|
let dump_cmd = pg_dump_shell_cmd(
|
||||||
|
has_local,
|
||||||
|
pg_version,
|
||||||
|
"--no-owner --no-acl",
|
||||||
|
source_url,
|
||||||
|
docker_host,
|
||||||
|
);
|
||||||
let escaped_db = shell_escape(database);
|
let escaped_db = shell_escape(database);
|
||||||
let host_flag = docker_host_flag(docker_host);
|
let host_flag = docker_host_flag(docker_host);
|
||||||
let pipe_cmd = format!(
|
let pipe_cmd = format!(
|
||||||
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
|
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
|
||||||
dump_cmd, host_flag, shell_escape(container_name), escaped_db
|
dump_cmd,
|
||||||
|
host_flag,
|
||||||
|
shell_escape(container_name),
|
||||||
|
escaped_db
|
||||||
);
|
);
|
||||||
|
|
||||||
run_pipe_cmd(app, clone_id, &pipe_cmd, "Full clone").await?;
|
run_pipe_cmd(app, clone_id, &pipe_cmd, "Full clone").await?;
|
||||||
@@ -599,7 +845,8 @@ async fn transfer_full_clone(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn transfer_sample_data(
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn transfer_sample_data_with(
|
||||||
app: &AppHandle,
|
app: &AppHandle,
|
||||||
clone_id: &str,
|
clone_id: &str,
|
||||||
source_url: &str,
|
source_url: &str,
|
||||||
@@ -608,6 +855,7 @@ async fn transfer_sample_data(
|
|||||||
pg_version: &str,
|
pg_version: &str,
|
||||||
sample_rows: u32,
|
sample_rows: u32,
|
||||||
docker_host: &Option<String>,
|
docker_host: &Option<String>,
|
||||||
|
has_local: bool,
|
||||||
) -> TuskResult<()> {
|
) -> TuskResult<()> {
|
||||||
// List tables from the target (schema already transferred)
|
// List tables from the target (schema already transferred)
|
||||||
let target_output = docker_cmd_sync(docker_host)
|
let target_output = docker_cmd_sync(docker_host)
|
||||||
@@ -622,21 +870,37 @@ async fn transfer_sample_data(
|
|||||||
.map_err(|e| docker_err(format!("Failed to list tables: {}", e)))?;
|
.map_err(|e| docker_err(format!("Failed to list tables: {}", e)))?;
|
||||||
|
|
||||||
let tables_str = String::from_utf8_lossy(&target_output.stdout);
|
let tables_str = String::from_utf8_lossy(&target_output.stdout);
|
||||||
let tables: Vec<&str> = tables_str.lines().filter(|l| !l.trim().is_empty()).collect();
|
let tables: Vec<&str> = tables_str
|
||||||
|
.lines()
|
||||||
|
.filter(|l| !l.trim().is_empty())
|
||||||
|
.collect();
|
||||||
let total = tables.len();
|
let total = tables.len();
|
||||||
|
|
||||||
if total == 0 {
|
if total == 0 {
|
||||||
emit_progress(app, clone_id, "transfer", 85, "No tables to copy data for", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
85,
|
||||||
|
"No tables to copy data for",
|
||||||
|
None,
|
||||||
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let has_local = try_local_pg_dump().await;
|
|
||||||
|
|
||||||
for (i, qualified_table) in tables.iter().enumerate() {
|
for (i, qualified_table) in tables.iter().enumerate() {
|
||||||
let pct = 65 + ((i * 20) / total.max(1)).min(20) as u8;
|
let pct = 65 + ((i * 20) / total.max(1)).min(20) as u8;
|
||||||
emit_progress(
|
emit_progress(
|
||||||
app, clone_id, "transfer", pct,
|
app,
|
||||||
&format!("Copying sample data: {} ({}/{})", qualified_table, i + 1, total),
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
pct,
|
||||||
|
&format!(
|
||||||
|
"Copying sample data: {} ({}/{})",
|
||||||
|
qualified_table,
|
||||||
|
i + 1,
|
||||||
|
total
|
||||||
|
),
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -680,17 +944,17 @@ async fn transfer_sample_data(
|
|||||||
source_cmd, host_flag, escaped_container, escaped_db, copy_in_sql
|
source_cmd, host_flag, escaped_container, escaped_db, copy_in_sql
|
||||||
);
|
);
|
||||||
|
|
||||||
let output = Command::new("bash")
|
let output = Command::new("bash").args(["-c", &pipe_cmd]).output().await;
|
||||||
.args(["-c", &pipe_cmd])
|
|
||||||
.output()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match output {
|
match output {
|
||||||
Ok(out) => {
|
Ok(out) => {
|
||||||
let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string();
|
let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string();
|
||||||
if !stderr.is_empty() && (stderr.contains("ERROR") || stderr.contains("FATAL")) {
|
if !stderr.is_empty() && (stderr.contains("ERROR") || stderr.contains("FATAL")) {
|
||||||
emit_progress(
|
emit_progress(
|
||||||
app, clone_id, "transfer", pct,
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
pct,
|
||||||
&format!("Warning: {}", qualified_table),
|
&format!("Warning: {}", qualified_table),
|
||||||
Some(&stderr),
|
Some(&stderr),
|
||||||
);
|
);
|
||||||
@@ -698,7 +962,10 @@ async fn transfer_sample_data(
|
|||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
emit_progress(
|
emit_progress(
|
||||||
app, clone_id, "transfer", pct,
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
pct,
|
||||||
&format!("Warning: failed to copy {}: {}", qualified_table, e),
|
&format!("Warning: failed to copy {}: {}", qualified_table, e),
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
@@ -706,7 +973,14 @@ async fn transfer_sample_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emit_progress(app, clone_id, "transfer", 85, "Sample data transfer completed", None);
|
emit_progress(
|
||||||
|
app,
|
||||||
|
clone_id,
|
||||||
|
"transfer",
|
||||||
|
85,
|
||||||
|
"Sample data transfer completed",
|
||||||
|
None,
|
||||||
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -776,8 +1050,159 @@ pub async fn remove_container(state: State<'_, Arc<AppState>>, name: String) ->
|
|||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
return Err(docker_err(format!("Failed to remove container: {}", stderr)));
|
return Err(docker_err(format!(
|
||||||
|
"Failed to remove container: {}",
|
||||||
|
stderr
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// ── validate_container_name ───────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_valid_simple() {
|
||||||
|
assert!(validate_container_name("mycontainer").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_valid_with_dots_dashes_underscores() {
|
||||||
|
assert!(validate_container_name("my-container_v1.2").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_valid_starts_with_digit() {
|
||||||
|
assert!(validate_container_name("1container").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_empty() {
|
||||||
|
assert!(validate_container_name("").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_starts_with_dash() {
|
||||||
|
assert!(validate_container_name("-bad").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_starts_with_dot() {
|
||||||
|
assert!(validate_container_name(".bad").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_starts_with_underscore() {
|
||||||
|
assert!(validate_container_name("_bad").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_with_spaces() {
|
||||||
|
assert!(validate_container_name("bad name").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_with_unicode() {
|
||||||
|
assert!(validate_container_name("контейнер").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_with_special_chars() {
|
||||||
|
assert!(validate_container_name("bad;name").is_err());
|
||||||
|
assert!(validate_container_name("bad/name").is_err());
|
||||||
|
assert!(validate_container_name("bad:name").is_err());
|
||||||
|
assert!(validate_container_name("bad@name").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn container_name_with_shell_injection() {
|
||||||
|
assert!(validate_container_name("x; rm -rf /").is_err());
|
||||||
|
assert!(validate_container_name("x$(whoami)").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── validate_pg_version ───────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_valid_major() {
|
||||||
|
assert!(validate_pg_version("16").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_valid_major_minor() {
|
||||||
|
assert!(validate_pg_version("16.2").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_valid_three_parts() {
|
||||||
|
assert!(validate_pg_version("17.1.0").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_empty() {
|
||||||
|
assert!(validate_pg_version("").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_with_letters() {
|
||||||
|
assert!(validate_pg_version("16beta1").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_with_injection() {
|
||||||
|
assert!(validate_pg_version("16; rm -rf").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pg_version_only_dots() {
|
||||||
|
// Current impl allows dots-only — this documents the behavior
|
||||||
|
assert!(validate_pg_version("...").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── shell_escape ──────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_no_quotes() {
|
||||||
|
assert_eq!(shell_escape("hello"), "hello");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_with_single_quote() {
|
||||||
|
assert_eq!(shell_escape("it's"), "it'\\''s");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_multiple_quotes() {
|
||||||
|
assert_eq!(shell_escape("a'b'c"), "a'\\''b'\\''c");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── shell_escape_double ───────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_double_no_special() {
|
||||||
|
assert_eq!(shell_escape_double("hello"), "hello");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_double_with_backslash() {
|
||||||
|
assert_eq!(shell_escape_double(r"a\b"), r"a\\b");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_double_with_dollar() {
|
||||||
|
assert_eq!(shell_escape_double("$HOME"), "\\$HOME");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_double_with_backtick() {
|
||||||
|
assert_eq!(shell_escape_double("`whoami`"), "\\`whoami\\`");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shell_escape_double_with_double_quote() {
|
||||||
|
assert_eq!(shell_escape_double(r#"say "hi""#), r#"say \"hi\""#);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -46,12 +46,10 @@ pub async fn create_snapshot(
|
|||||||
|
|
||||||
if params.include_dependencies {
|
if params.include_dependencies {
|
||||||
for fk in &fk_rows {
|
for fk in &fk_rows {
|
||||||
for (schema, table) in ¶ms.tables.iter().map(|t| (t.schema.clone(), t.table.clone())).collect::<Vec<_>>() {
|
if target_tables.iter().any(|(s, t)| s == &fk.schema && t == &fk.table) {
|
||||||
if &fk.schema == schema && &fk.table == table {
|
let parent = (fk.ref_schema.clone(), fk.ref_table.clone());
|
||||||
let parent = (fk.ref_schema.clone(), fk.ref_table.clone());
|
if !target_tables.contains(&parent) {
|
||||||
if !target_tables.contains(&parent) {
|
target_tables.push(parent);
|
||||||
target_tables.push(parent);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -60,11 +58,18 @@ pub async fn create_snapshot(
|
|||||||
// FK-based topological sort
|
// FK-based topological sort
|
||||||
let fk_edges: Vec<(String, String, String, String)> = fk_rows
|
let fk_edges: Vec<(String, String, String, String)> = fk_rows
|
||||||
.iter()
|
.iter()
|
||||||
.map(|fk| (fk.schema.clone(), fk.table.clone(), fk.ref_schema.clone(), fk.ref_table.clone()))
|
.map(|fk| {
|
||||||
|
(
|
||||||
|
fk.schema.clone(),
|
||||||
|
fk.table.clone(),
|
||||||
|
fk.ref_schema.clone(),
|
||||||
|
fk.ref_table.clone(),
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let sorted_tables = topological_sort_tables(&fk_edges, &target_tables);
|
let sorted_tables = topological_sort_tables(&fk_edges, &target_tables);
|
||||||
|
|
||||||
let mut tx = (&pool).begin().await.map_err(TuskError::Database)?;
|
let mut tx = pool.begin().await.map_err(TuskError::Database)?;
|
||||||
sqlx::query("SET TRANSACTION READ ONLY")
|
sqlx::query("SET TRANSACTION READ ONLY")
|
||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
.await
|
.await
|
||||||
@@ -107,7 +112,11 @@ pub async fn create_snapshot(
|
|||||||
|
|
||||||
let data_rows: Vec<Vec<Value>> = rows
|
let data_rows: Vec<Vec<Value>> = rows
|
||||||
.iter()
|
.iter()
|
||||||
.map(|row| (0..columns.len()).map(|i| pg_value_to_json(row, i)).collect())
|
.map(|row| {
|
||||||
|
(0..columns.len())
|
||||||
|
.map(|i| pg_value_to_json(row, i))
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let row_count = data_rows.len() as u64;
|
let row_count = data_rows.len() as u64;
|
||||||
@@ -207,7 +216,7 @@ pub async fn restore_snapshot(
|
|||||||
let snapshot: Snapshot = serde_json::from_str(&data)?;
|
let snapshot: Snapshot = serde_json::from_str(&data)?;
|
||||||
|
|
||||||
let pool = state.get_pool(¶ms.connection_id).await?;
|
let pool = state.get_pool(¶ms.connection_id).await?;
|
||||||
let mut tx = (&pool).begin().await.map_err(TuskError::Database)?;
|
let mut tx = pool.begin().await.map_err(TuskError::Database)?;
|
||||||
|
|
||||||
sqlx::query("SET CONSTRAINTS ALL DEFERRED")
|
sqlx::query("SET CONSTRAINTS ALL DEFERRED")
|
||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
|
|||||||
@@ -83,16 +83,6 @@ pub struct ValidationRule {
|
|||||||
pub error: Option<String>,
|
pub error: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ValidationReport {
|
|
||||||
pub rules: Vec<ValidationRule>,
|
|
||||||
pub total_rules: usize,
|
|
||||||
pub passed: usize,
|
|
||||||
pub failed: usize,
|
|
||||||
pub errors: usize,
|
|
||||||
pub execution_time_ms: u128,
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Wave 2: Data Generator ---
|
// --- Wave 2: Data Generator ---
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -165,9 +155,12 @@ pub struct SlowQuery {
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum IndexRecommendationType {
|
pub enum IndexRecommendationType {
|
||||||
CreateIndex,
|
#[serde(rename = "create_index")]
|
||||||
DropIndex,
|
Create,
|
||||||
ReplaceIndex,
|
#[serde(rename = "drop_index")]
|
||||||
|
Drop,
|
||||||
|
#[serde(rename = "replace_index")]
|
||||||
|
Replace,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use crate::models::ai::AiSettings;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::sync::{watch, RwLock};
|
use tokio::sync::{watch, RwLock};
|
||||||
|
|
||||||
@@ -22,7 +21,6 @@ pub struct SchemaCacheEntry {
|
|||||||
|
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub pools: RwLock<HashMap<String, PgPool>>,
|
pub pools: RwLock<HashMap<String, PgPool>>,
|
||||||
pub config_path: RwLock<Option<PathBuf>>,
|
|
||||||
pub read_only: RwLock<HashMap<String, bool>>,
|
pub read_only: RwLock<HashMap<String, bool>>,
|
||||||
pub db_flavors: RwLock<HashMap<String, DbFlavor>>,
|
pub db_flavors: RwLock<HashMap<String, DbFlavor>>,
|
||||||
pub schema_cache: RwLock<HashMap<String, SchemaCacheEntry>>,
|
pub schema_cache: RwLock<HashMap<String, SchemaCacheEntry>>,
|
||||||
@@ -39,7 +37,6 @@ impl AppState {
|
|||||||
let (mcp_shutdown_tx, _) = watch::channel(false);
|
let (mcp_shutdown_tx, _) = watch::channel(false);
|
||||||
Self {
|
Self {
|
||||||
pools: RwLock::new(HashMap::new()),
|
pools: RwLock::new(HashMap::new()),
|
||||||
config_path: RwLock::new(None),
|
|
||||||
read_only: RwLock::new(HashMap::new()),
|
read_only: RwLock::new(HashMap::new()),
|
||||||
db_flavors: RwLock::new(HashMap::new()),
|
db_flavors: RwLock::new(HashMap::new()),
|
||||||
schema_cache: RwLock::new(HashMap::new()),
|
schema_cache: RwLock::new(HashMap::new()),
|
||||||
@@ -81,6 +78,8 @@ impl AppState {
|
|||||||
|
|
||||||
pub async fn set_schema_cache(&self, connection_id: String, schema_text: String) {
|
pub async fn set_schema_cache(&self, connection_id: String, schema_text: String) {
|
||||||
let mut cache = self.schema_cache.write().await;
|
let mut cache = self.schema_cache.write().await;
|
||||||
|
// Evict stale entries to prevent unbounded memory growth
|
||||||
|
cache.retain(|_, entry| entry.cached_at.elapsed() < SCHEMA_CACHE_TTL);
|
||||||
cache.insert(
|
cache.insert(
|
||||||
connection_id,
|
connection_id,
|
||||||
SchemaCacheEntry {
|
SchemaCacheEntry {
|
||||||
|
|||||||
Reference in New Issue
Block a user