feat: add unified Settings sheet, MCP indicator, and Docker host config

- Add AppSettingsSheet (gear icon in Toolbar) with MCP, Docker, and AI sections
- MCP Server: toggle on/off, port config, status badge, endpoint URL with copy
- Docker: local/remote daemon selector with remote URL input
- AI: moved Ollama settings into the unified sheet
- MCP status probes actual TCP port for reliable running detection
- Docker commands respect configurable docker host (-H flag) for remote daemons
- MCP server supports graceful shutdown via tokio watch channel
- Settings persisted to app_settings.json alongside existing config files
- StatusBar shows MCP indicator (green/gray dot) with tooltip

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-16 09:04:12 +03:00
parent 20b00e55b0
commit e76a96deb8
14 changed files with 800 additions and 42 deletions

View File

@@ -9,6 +9,15 @@ use std::sync::Arc;
use tauri::{AppHandle, Emitter, Manager, State};
use tokio::process::Command;
async fn docker_cmd(state: &AppState) -> Command {
let host = state.docker_host.read().await;
let mut cmd = Command::new("docker");
if let Some(ref h) = *host {
cmd.args(["-H", h]);
}
cmd
}
fn docker_err(msg: impl Into<String>) -> TuskError {
TuskError::Docker(msg.into())
}
@@ -61,8 +70,9 @@ fn shell_escape(s: &str) -> String {
}
#[tauri::command]
pub async fn check_docker() -> TuskResult<DockerStatus> {
let output = Command::new("docker")
pub async fn check_docker(state: State<'_, Arc<AppState>>) -> TuskResult<DockerStatus> {
let output = docker_cmd(&state)
.await
.args(["version", "--format", "{{.Server.Version}}"])
.output()
.await;
@@ -99,8 +109,9 @@ pub async fn check_docker() -> TuskResult<DockerStatus> {
}
#[tauri::command]
pub async fn list_tusk_containers() -> TuskResult<Vec<TuskContainer>> {
let output = Command::new("docker")
pub async fn list_tusk_containers(state: State<'_, Arc<AppState>>) -> TuskResult<Vec<TuskContainer>> {
let output = docker_cmd(&state)
.await
.args([
"ps",
"-a",
@@ -189,15 +200,63 @@ pub async fn clone_to_docker(
.map_err(|e| docker_err(format!("Clone task panicked: {}", e)))?
}
/// Build a docker Command respecting the remote host setting
fn docker_cmd_sync(docker_host: &Option<String>) -> Command {
let mut cmd = Command::new("docker");
if let Some(ref h) = docker_host {
cmd.args(["-H", h]);
}
cmd
}
async fn check_docker_internal(docker_host: &Option<String>) -> TuskResult<DockerStatus> {
let output = docker_cmd_sync(docker_host)
.args(["version", "--format", "{{.Server.Version}}"])
.output()
.await;
match output {
Ok(out) => {
if out.status.success() {
let version = String::from_utf8_lossy(&out.stdout).trim().to_string();
Ok(DockerStatus {
installed: true,
daemon_running: true,
version: Some(version),
error: None,
})
} else {
let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string();
let daemon_running = !stderr.contains("Cannot connect")
&& !stderr.contains("connection refused");
Ok(DockerStatus {
installed: true,
daemon_running,
version: None,
error: Some(stderr),
})
}
}
Err(_) => Ok(DockerStatus {
installed: false,
daemon_running: false,
version: None,
error: Some("Docker CLI not found. Please install Docker.".to_string()),
}),
}
}
async fn do_clone(
app: &AppHandle,
_state: &Arc<AppState>,
state: &Arc<AppState>,
params: &CloneToDockerParams,
clone_id: &str,
) -> TuskResult<CloneResult> {
let docker_host = state.docker_host.read().await.clone();
// Step 1: Check Docker
emit_progress(app, clone_id, "checking", 5, "Checking Docker availability...", None);
let status = check_docker().await?;
let status = check_docker_internal(&docker_host).await?;
if !status.installed || !status.daemon_running {
let msg = status
.error
@@ -219,7 +278,7 @@ async fn do_clone(
let pg_password = params.postgres_password.as_deref().unwrap_or("tusk");
let image = format!("postgres:{}", params.pg_version);
let create_output = Command::new("docker")
let create_output = docker_cmd_sync(&docker_host)
.args([
"run", "-d",
"--name", &params.container_name,
@@ -245,12 +304,12 @@ async fn do_clone(
// Step 4: Wait for PostgreSQL to be ready
emit_progress(app, clone_id, "waiting", 30, "Waiting for PostgreSQL to be ready...", None);
wait_for_pg_ready(&params.container_name, 30).await?;
wait_for_pg_ready(&docker_host, &params.container_name, 30).await?;
emit_progress(app, clone_id, "waiting", 35, "PostgreSQL is ready", None);
// Step 5: Create target database
emit_progress(app, clone_id, "database", 35, &format!("Creating database '{}'...", params.source_database), None);
let create_db_output = Command::new("docker")
let create_db_output = docker_cmd_sync(&docker_host)
.args([
"exec", &params.container_name,
"psql", "-U", "postgres", "-c",
@@ -282,18 +341,18 @@ async fn do_clone(
match params.clone_mode {
CloneMode::SchemaOnly => {
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
transfer_schema_only(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version).await?;
transfer_schema_only(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version, &docker_host).await?;
}
CloneMode::FullClone => {
emit_progress(app, clone_id, "transfer", 45, "Performing full database clone...", None);
transfer_full_clone(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version).await?;
transfer_full_clone(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version, &docker_host).await?;
}
CloneMode::SampleData => {
emit_progress(app, clone_id, "transfer", 45, "Dumping schema...", None);
transfer_schema_only(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version).await?;
transfer_schema_only(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version, &docker_host).await?;
emit_progress(app, clone_id, "transfer", 65, "Copying sample data...", None);
let sample_rows = params.sample_rows.unwrap_or(1000);
transfer_sample_data(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version, sample_rows).await?;
transfer_sample_data(app, clone_id, &source_url, &params.container_name, &params.source_database, &params.pg_version, sample_rows, &docker_host).await?;
}
}
@@ -354,7 +413,7 @@ async fn find_free_port() -> TuskResult<u16> {
Ok(port)
}
async fn wait_for_pg_ready(container_name: &str, timeout_secs: u64) -> TuskResult<()> {
async fn wait_for_pg_ready(docker_host: &Option<String>, container_name: &str, timeout_secs: u64) -> TuskResult<()> {
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(timeout_secs);
@@ -363,7 +422,7 @@ async fn wait_for_pg_ready(container_name: &str, timeout_secs: u64) -> TuskResul
return Err(docker_err("PostgreSQL did not become ready in time"));
}
let output = Command::new("docker")
let output = docker_cmd_sync(docker_host)
.args(["exec", container_name, "pg_isready", "-U", "postgres"])
.output()
.await;
@@ -387,15 +446,24 @@ async fn try_local_pg_dump() -> bool {
.unwrap_or(false)
}
/// Build the docker host flag string for shell commands
fn docker_host_flag(docker_host: &Option<String>) -> String {
match docker_host {
Some(h) => format!("-H '{}'", shell_escape(h)),
None => String::new(),
}
}
/// Build the pg_dump portion of a shell command
fn pg_dump_shell_cmd(has_local: bool, pg_version: &str, extra_args: &str, source_url: &str) -> String {
fn pg_dump_shell_cmd(has_local: bool, pg_version: &str, extra_args: &str, source_url: &str, docker_host: &Option<String>) -> String {
let escaped_url = shell_escape(source_url);
if has_local {
format!("pg_dump {} '{}'", extra_args, escaped_url)
} else {
let host_flag = docker_host_flag(docker_host);
format!(
"docker run --rm --network=host postgres:{} pg_dump {} '{}'",
pg_version, extra_args, escaped_url
"docker {} run --rm --network=host postgres:{} pg_dump {} '{}'",
host_flag, pg_version, extra_args, escaped_url
)
}
}
@@ -468,16 +536,18 @@ async fn transfer_schema_only(
container_name: &str,
database: &str,
pg_version: &str,
docker_host: &Option<String>,
) -> TuskResult<()> {
let has_local = try_local_pg_dump().await;
let label = if has_local { "local pg_dump" } else { "Docker-based pg_dump" };
emit_progress(app, clone_id, "transfer", 48, &format!("Using {} for schema...", label), None);
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--schema-only --no-owner --no-acl", source_url);
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--schema-only --no-owner --no-acl", source_url, docker_host);
let escaped_db = shell_escape(database);
let host_flag = docker_host_flag(docker_host);
let pipe_cmd = format!(
"{} | docker exec -i '{}' psql -U postgres -d '{}'",
dump_cmd, shell_escape(container_name), escaped_db
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
dump_cmd, host_flag, shell_escape(container_name), escaped_db
);
run_pipe_cmd(app, clone_id, &pipe_cmd, "Schema transfer").await?;
@@ -493,17 +563,19 @@ async fn transfer_full_clone(
container_name: &str,
database: &str,
pg_version: &str,
docker_host: &Option<String>,
) -> TuskResult<()> {
let has_local = try_local_pg_dump().await;
let label = if has_local { "local pg_dump" } else { "Docker-based pg_dump" };
emit_progress(app, clone_id, "transfer", 48, &format!("Using {} for full clone...", label), None);
// Use plain text format piped to psql (more reliable than -Fc | pg_restore through docker exec)
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--no-owner --no-acl", source_url);
let dump_cmd = pg_dump_shell_cmd(has_local, pg_version, "--no-owner --no-acl", source_url, docker_host);
let escaped_db = shell_escape(database);
let host_flag = docker_host_flag(docker_host);
let pipe_cmd = format!(
"{} | docker exec -i '{}' psql -U postgres -d '{}'",
dump_cmd, shell_escape(container_name), escaped_db
"{} | docker {} exec -i '{}' psql -U postgres -d '{}'",
dump_cmd, host_flag, shell_escape(container_name), escaped_db
);
run_pipe_cmd(app, clone_id, &pipe_cmd, "Full clone").await?;
@@ -520,9 +592,10 @@ async fn transfer_sample_data(
database: &str,
pg_version: &str,
sample_rows: u32,
docker_host: &Option<String>,
) -> TuskResult<()> {
// List tables from the target (schema already transferred)
let target_output = Command::new("docker")
let target_output = docker_cmd_sync(docker_host)
.args([
"exec", container_name,
"psql", "-U", "postgres", "-d", database,
@@ -573,19 +646,20 @@ async fn transfer_sample_data(
let escaped_container = shell_escape(container_name);
let escaped_db = shell_escape(database);
let host_flag = docker_host_flag(docker_host);
let source_cmd = if has_local {
format!("psql '{}' -c \"{}\"", escaped_url, copy_out_sql)
} else {
let image = format!("postgres:{}", pg_version);
format!(
"docker run --rm --network=host {} psql '{}' -c \"{}\"",
image, escaped_url, copy_out_sql
"docker {} run --rm --network=host {} psql '{}' -c \"{}\"",
host_flag, image, escaped_url, copy_out_sql
)
};
let pipe_cmd = format!(
"set -o pipefail; {} | docker exec -i '{}' psql -U postgres -d '{}' -c \"{}\"",
source_cmd, escaped_container, escaped_db, copy_in_sql
"set -o pipefail; {} | docker {} exec -i '{}' psql -U postgres -d '{}' -c \"{}\"",
source_cmd, host_flag, escaped_container, escaped_db, copy_in_sql
);
let output = Command::new("bash")
@@ -635,8 +709,9 @@ fn save_connection_config(app: &AppHandle, config: &ConnectionConfig) -> TuskRes
}
#[tauri::command]
pub async fn start_container(name: String) -> TuskResult<()> {
let output = Command::new("docker")
pub async fn start_container(state: State<'_, Arc<AppState>>, name: String) -> TuskResult<()> {
let output = docker_cmd(&state)
.await
.args(["start", &name])
.output()
.await
@@ -651,8 +726,9 @@ pub async fn start_container(name: String) -> TuskResult<()> {
}
#[tauri::command]
pub async fn stop_container(name: String) -> TuskResult<()> {
let output = Command::new("docker")
pub async fn stop_container(state: State<'_, Arc<AppState>>, name: String) -> TuskResult<()> {
let output = docker_cmd(&state)
.await
.args(["stop", &name])
.output()
.await
@@ -667,8 +743,9 @@ pub async fn stop_container(name: String) -> TuskResult<()> {
}
#[tauri::command]
pub async fn remove_container(name: String) -> TuskResult<()> {
let output = Command::new("docker")
pub async fn remove_container(state: State<'_, Arc<AppState>>, name: String) -> TuskResult<()> {
let output = docker_cmd(&state)
.await
.args(["rm", "-f", &name])
.output()
.await

View File

@@ -9,3 +9,4 @@ pub mod management;
pub mod queries;
pub mod saved_queries;
pub mod schema;
pub mod settings;

View File

@@ -0,0 +1,116 @@
use crate::error::{TuskError, TuskResult};
use crate::mcp;
use crate::models::settings::{AppSettings, DockerHost, McpStatus};
use crate::state::AppState;
use std::fs;
use std::sync::Arc;
use tauri::{AppHandle, Manager, State};
fn get_settings_path(app: &AppHandle) -> TuskResult<std::path::PathBuf> {
let dir = app
.path()
.app_data_dir()
.map_err(|e| TuskError::Custom(e.to_string()))?;
fs::create_dir_all(&dir)?;
Ok(dir.join("app_settings.json"))
}
#[tauri::command]
pub async fn get_app_settings(app: AppHandle) -> TuskResult<AppSettings> {
let path = get_settings_path(&app)?;
if !path.exists() {
return Ok(AppSettings::default());
}
let data = fs::read_to_string(&path)?;
let settings: AppSettings = serde_json::from_str(&data)?;
Ok(settings)
}
#[tauri::command]
pub async fn save_app_settings(
app: AppHandle,
state: State<'_, Arc<AppState>>,
settings: AppSettings,
) -> TuskResult<()> {
let path = get_settings_path(&app)?;
let data = serde_json::to_string_pretty(&settings)?;
fs::write(&path, data)?;
// Apply docker host setting
{
let mut docker_host = state.docker_host.write().await;
*docker_host = match settings.docker.host {
DockerHost::Remote => settings.docker.remote_url.clone(),
DockerHost::Local => None,
};
}
// Apply MCP setting: restart or stop
let is_running = *state.mcp_running.read().await;
if settings.mcp.enabled {
if is_running {
// Stop existing MCP server first
let _ = state.mcp_shutdown_tx.send(true);
// Give it a moment to shut down
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
*state.mcp_running.write().await = false;
}
// Start new MCP server
let connections_path = app
.path()
.app_data_dir()
.map_err(|e| TuskError::Custom(e.to_string()))?
.join("connections.json");
let mcp_state = state.inner().clone();
let port = settings.mcp.port;
let shutdown_rx = state.mcp_shutdown_tx.subscribe();
tokio::spawn(async move {
*mcp_state.mcp_running.write().await = true;
if let Err(e) =
mcp::start_mcp_server(mcp_state.clone(), connections_path, port, shutdown_rx).await
{
log::error!("MCP server error: {}", e);
}
*mcp_state.mcp_running.write().await = false;
});
} else if is_running {
// Stop MCP server
let _ = state.mcp_shutdown_tx.send(true);
*state.mcp_running.write().await = false;
}
Ok(())
}
#[tauri::command]
pub async fn get_mcp_status(app: AppHandle) -> TuskResult<McpStatus> {
// Read settings from file for enabled/port
let settings = {
let path = get_settings_path(&app)?;
if path.exists() {
let data = fs::read_to_string(&path)?;
serde_json::from_str::<AppSettings>(&data).unwrap_or_default()
} else {
AppSettings::default()
}
};
// Probe the actual port to determine if MCP is running
let running = tokio::time::timeout(
std::time::Duration::from_millis(500),
tokio::net::TcpStream::connect(format!("127.0.0.1:{}", settings.mcp.port)),
)
.await
.map(|r| r.is_ok())
.unwrap_or(false);
Ok(McpStatus {
enabled: settings.mcp.enabled,
port: settings.mcp.port,
running,
})
}

View File

@@ -5,6 +5,7 @@ mod models;
mod state;
mod utils;
use models::settings::{AppSettings, DockerHost};
use state::AppState;
use std::sync::Arc;
use tauri::Manager;
@@ -24,12 +25,52 @@ pub fn run() {
.expect("failed to resolve app data dir")
.join("connections.json");
tauri::async_runtime::spawn(async move {
if let Err(e) = mcp::start_mcp_server(state, connections_path, 9427).await {
log::error!("MCP server error: {}", e);
}
// Read app settings
let settings_path = app
.path()
.app_data_dir()
.expect("failed to resolve app data dir")
.join("app_settings.json");
let settings = if settings_path.exists() {
std::fs::read_to_string(&settings_path)
.ok()
.and_then(|data| serde_json::from_str::<AppSettings>(&data).ok())
.unwrap_or_default()
} else {
AppSettings::default()
};
// Apply docker host from settings
let docker_host = match settings.docker.host {
DockerHost::Remote => settings.docker.remote_url.clone(),
DockerHost::Local => None,
};
let mcp_enabled = settings.mcp.enabled;
let mcp_port = settings.mcp.port;
// Set docker host synchronously (state is fresh, no contention)
let state_for_setup = state.clone();
tauri::async_runtime::block_on(async {
*state_for_setup.docker_host.write().await = docker_host;
});
if mcp_enabled {
let shutdown_rx = state.mcp_shutdown_tx.subscribe();
let mcp_state = state.clone();
tauri::async_runtime::spawn(async move {
*mcp_state.mcp_running.write().await = true;
if let Err(e) =
mcp::start_mcp_server(mcp_state.clone(), connections_path, mcp_port, shutdown_rx)
.await
{
log::error!("MCP server error: {}", e);
}
*mcp_state.mcp_running.write().await = false;
});
}
Ok(())
})
.invoke_handler(tauri::generate_handler![
@@ -107,6 +148,10 @@ pub fn run() {
commands::docker::start_container,
commands::docker::stop_container,
commands::docker::remove_container,
// settings
commands::settings::get_app_settings,
commands::settings::save_app_settings,
commands::settings::get_mcp_status,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@@ -13,6 +13,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::watch;
// --- Tool parameter types ---
@@ -217,6 +218,7 @@ pub async fn start_mcp_server(
state: Arc<AppState>,
connections_path: PathBuf,
port: u16,
mut shutdown_rx: watch::Receiver<bool>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let service = StreamableHttpService::new(
move || Ok(TuskMcpServer::new(state.clone(), connections_path.clone())),
@@ -230,7 +232,14 @@ pub async fn start_mcp_server(
log::info!("MCP server listening on http://{}/mcp", addr);
axum::serve(listener, router).await?;
tokio::select! {
res = axum::serve(listener, router) => {
res?;
}
_ = shutdown_rx.changed() => {
log::info!("MCP server stopped by shutdown signal");
}
}
Ok(())
}

View File

@@ -7,3 +7,4 @@ pub mod management;
pub mod query_result;
pub mod saved_queries;
pub mod schema;
pub mod settings;

View File

@@ -0,0 +1,60 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppSettings {
pub mcp: McpSettings,
pub docker: DockerSettings,
}
impl Default for AppSettings {
fn default() -> Self {
Self {
mcp: McpSettings::default(),
docker: DockerSettings::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpSettings {
pub enabled: bool,
pub port: u16,
}
impl Default for McpSettings {
fn default() -> Self {
Self {
enabled: true,
port: 9427,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DockerSettings {
pub host: DockerHost,
pub remote_url: Option<String>,
}
impl Default for DockerSettings {
fn default() -> Self {
Self {
host: DockerHost::Local,
remote_url: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum DockerHost {
Local,
Remote,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpStatus {
pub enabled: bool,
pub port: u16,
pub running: bool,
}

View File

@@ -2,7 +2,8 @@ use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::collections::HashMap;
use std::path::PathBuf;
use tokio::sync::RwLock;
use std::time::{Duration, Instant};
use tokio::sync::{watch, RwLock};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
@@ -11,20 +12,37 @@ pub enum DbFlavor {
Greenplum,
}
#[derive(Clone)]
pub struct SchemaCacheEntry {
pub schema_text: String,
pub cached_at: Instant,
}
pub struct AppState {
pub pools: RwLock<HashMap<String, PgPool>>,
pub config_path: RwLock<Option<PathBuf>>,
pub read_only: RwLock<HashMap<String, bool>>,
pub db_flavors: RwLock<HashMap<String, DbFlavor>>,
pub schema_cache: RwLock<HashMap<String, SchemaCacheEntry>>,
pub mcp_shutdown_tx: watch::Sender<bool>,
pub mcp_running: RwLock<bool>,
pub docker_host: RwLock<Option<String>>,
}
const SCHEMA_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
impl AppState {
pub fn new() -> Self {
let (mcp_shutdown_tx, _) = watch::channel(false);
Self {
pools: RwLock::new(HashMap::new()),
config_path: RwLock::new(None),
read_only: RwLock::new(HashMap::new()),
db_flavors: RwLock::new(HashMap::new()),
schema_cache: RwLock::new(HashMap::new()),
mcp_shutdown_tx,
mcp_running: RwLock::new(false),
docker_host: RwLock::new(None),
}
}
@@ -37,4 +55,31 @@ impl AppState {
let map = self.db_flavors.read().await;
map.get(id).copied().unwrap_or(DbFlavor::PostgreSQL)
}
pub async fn get_schema_cache(&self, connection_id: &str) -> Option<String> {
let cache = self.schema_cache.read().await;
cache.get(connection_id).and_then(|entry| {
if entry.cached_at.elapsed() < SCHEMA_CACHE_TTL {
Some(entry.schema_text.clone())
} else {
None
}
})
}
pub async fn set_schema_cache(&self, connection_id: String, schema_text: String) {
let mut cache = self.schema_cache.write().await;
cache.insert(
connection_id,
SchemaCacheEntry {
schema_text,
cached_at: Instant::now(),
},
);
}
pub async fn invalidate_schema_cache(&self, connection_id: &str) {
let mut cache = self.schema_cache.write().await;
cache.remove(connection_id);
}
}