From 204795a74f608d94398aac024519772e457fda45 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Wed, 17 Dec 2025 16:43:37 -0500 Subject: [PATCH] feat(failoverPostgres): Its alive! We can now deploy a multisite postgres instance. The public hostname is still hardcoded, we will have to fix that but the rest is good enough --- examples/multisite_postgres/env_example.sh | 3 + examples/multisite_postgres/src/main.rs | 17 +- examples/public_postgres/src/main.rs | 12 +- harmony/src/domain/topology/failover.rs | 21 ++- .../topology/k8s_anywhere/k8s_anywhere.rs | 154 ++++++++++++++---- .../domain/topology/k8s_anywhere/postgres.rs | 41 +++-- harmony/src/domain/topology/router.rs | 14 ++ harmony/src/modules/network/failover.rs | 4 + harmony/src/modules/postgresql/capability.rs | 27 +-- harmony/src/modules/postgresql/failover.rs | 47 +++--- .../src/modules/postgresql/score_public.rs | 32 ++-- 11 files changed, 258 insertions(+), 114 deletions(-) create mode 100644 examples/multisite_postgres/env_example.sh diff --git a/examples/multisite_postgres/env_example.sh b/examples/multisite_postgres/env_example.sh new file mode 100644 index 0000000..2ef957c --- /dev/null +++ b/examples/multisite_postgres/env_example.sh @@ -0,0 +1,3 @@ +export HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY="context=default/api-your-openshift-cluster:6443/kube:admin" +export HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA="context=someuser/somecluster" +export RUST_LOG="harmony=debug" diff --git a/examples/multisite_postgres/src/main.rs b/examples/multisite_postgres/src/main.rs index bcb02c1..7ae4beb 100644 --- a/examples/multisite_postgres/src/main.rs +++ b/examples/multisite_postgres/src/main.rs @@ -1,21 +1,18 @@ use harmony::{ inventory::Inventory, - modules::postgresql::{ - K8sPostgreSQLScore, PublicPostgreSQLScore, capability::PostgreSQLConfig, - }, + modules::postgresql::{PublicPostgreSQLScore, capability::PostgreSQLConfig}, topology::{FailoverTopology, K8sAnywhereTopology}, }; #[tokio::main] async fn main() { + // env_logger::init(); let postgres = PublicPostgreSQLScore { - postgres_score: K8sPostgreSQLScore { - config: PostgreSQLConfig { - cluster_name: "harmony-postgres-example".to_string(), // Override default name - namespace: "harmony-public-postgres".to_string(), - ..Default::default() // Use harmony defaults, they are based on CNPG's default values : - // "default" namespace, 1 instance, 1Gi storage - }, + config: PostgreSQLConfig { + cluster_name: "harmony-postgres-example".to_string(), // Override default name + namespace: "harmony-public-postgres".to_string(), + ..Default::default() // Use harmony defaults, they are based on CNPG's default values : + // "default" namespace, 1 instance, 1Gi storage }, hostname: "postgrestest.sto1.nationtech.io".to_string(), }; diff --git a/examples/public_postgres/src/main.rs b/examples/public_postgres/src/main.rs index 16927d1..029080e 100644 --- a/examples/public_postgres/src/main.rs +++ b/examples/public_postgres/src/main.rs @@ -10,13 +10,11 @@ use harmony::{ #[tokio::main] async fn main() { let postgres = PublicPostgreSQLScore { - postgres_score: K8sPostgreSQLScore { - config: PostgreSQLConfig { - cluster_name: "harmony-postgres-example".to_string(), // Override default name - namespace: "harmony-public-postgres".to_string(), - ..Default::default() // Use harmony defaults, they are based on CNPG's default values : - // 1 instance, 1Gi storage - }, + config: PostgreSQLConfig { + cluster_name: "harmony-postgres-example".to_string(), // Override default name + namespace: "harmony-public-postgres".to_string(), + ..Default::default() // Use harmony defaults, they are based on CNPG's default values : + // 1 instance, 1Gi storage }, hostname: "postgrestest.sto1.nationtech.io".to_string(), }; diff --git a/harmony/src/domain/topology/failover.rs b/harmony/src/domain/topology/failover.rs index 43c2ac6..0388222 100644 --- a/harmony/src/domain/topology/failover.rs +++ b/harmony/src/domain/topology/failover.rs @@ -9,13 +9,30 @@ pub struct FailoverTopology { } #[async_trait] -impl Topology for FailoverTopology { +impl Topology for FailoverTopology { fn name(&self) -> &str { "FailoverTopology" } async fn ensure_ready(&self) -> Result { - todo!() + let primary_outcome = self.primary.ensure_ready().await?; + let replica_outcome = self.replica.ensure_ready().await?; + + match (primary_outcome, replica_outcome) { + (PreparationOutcome::Noop, PreparationOutcome::Noop) => Ok(PreparationOutcome::Noop), + (p, r) => { + let mut details = Vec::new(); + if let PreparationOutcome::Success { details: d } = p { + details.push(format!("Primary: {}", d)); + } + if let PreparationOutcome::Success { details: d } = r { + details.push(format!("Replica: {}", d)); + } + Ok(PreparationOutcome::Success { + details: details.join(", "), + }) + } + } } } diff --git a/harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs index c3c6fa3..4e0337b 100644 --- a/harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs @@ -106,6 +106,16 @@ impl K8sclient for K8sAnywhereTopology { #[async_trait] impl TlsRouter for K8sAnywhereTopology { + async fn get_wildcard_domain(&self) -> Result, String> { + todo!() + } + + /// Returns the port that this router exposes externally. + async fn get_router_port(&self) -> u16 { + // TODO un-hardcode this :) + 443 + } + async fn install_route(&self, route: TlsRoute) -> Result<(), String> { let distro = self .get_k8s_distribution() @@ -682,7 +692,14 @@ impl K8sAnywhereTopology { return Ok(Some(K8sState { client: Arc::new(client), source: K8sSource::Kubeconfig, - message: format!("Loaded k8s client from kubeconfig {kubeconfig}"), + message: format!( + "Loaded k8s client from kubeconfig {kubeconfig} using context {}", + self.config + .k8s_context + .as_ref() + .map(|s| s.clone()) + .unwrap_or_default() + ), })); } None => { @@ -934,20 +951,23 @@ pub struct K8sAnywhereConfig { impl K8sAnywhereConfig { /// Reads an environment variable `env_var` and parses its content : /// Comma-separated `key=value` pairs, e.g., - /// `kubeconfig=/path/to/primary.kubeconfig,context_name=primary-ctx` + /// `kubeconfig=/path/to/primary.kubeconfig,context=primary-ctx` /// /// Then creates a K8sAnywhereConfig from it local installs disabled (`use_local_k3d=false`, /// `autoinstall=false`, `use_system_kubeconfig=false`). /// `harmony_profile` is read from `HARMONY_PROFILE` env or defaults to `"dev"`. /// + /// If no kubeconfig path is provided it will fall back to system kubeconfig + /// /// Panics if `env_var` is missing or malformed. pub fn remote_k8s_from_env_var(env_var: &str) -> Self { Self::remote_k8s_from_env_var_with_profile(env_var, "HARMONY_PROFILE") } pub fn remote_k8s_from_env_var_with_profile(env_var: &str, profile_env_var: &str) -> Self { + debug!("Looking for env var named : {env_var}"); let env_var_value = std::env::var(env_var) - .map_err(|_| format!("Missing required env var {env_var}")) + .map_err(|e| format!("Missing required env var {env_var} : {e}")) .unwrap(); info!("Initializing remote k8s from env var value : {env_var_value}"); @@ -959,16 +979,25 @@ impl K8sAnywhereConfig { if kv.len() == 2 { match kv[0].trim() { "kubeconfig" => kubeconfig = Some(kv[1].trim().to_string()), - "context_name" => k8s_context = Some(kv[1].trim().to_string()), + "context" => k8s_context = Some(kv[1].trim().to_string()), _ => {} } } } + debug!("Found in {env_var} : kubeconfig {kubeconfig:?} and context {k8s_context:?}"); + + let use_system_kubeconfig = kubeconfig.is_none(); + + if let Some(kubeconfig_value) = std::env::var("KUBECONFIG").ok().map(|v| v.to_string()) { + kubeconfig.get_or_insert(kubeconfig_value); + } + info!("Loading k8s environment with kubeconfig {kubeconfig:?} and context {k8s_context:?}"); + K8sAnywhereConfig { kubeconfig, k8s_context, - use_system_kubeconfig: false, + use_system_kubeconfig, autoinstall: false, use_local_k3d: false, harmony_profile: std::env::var(profile_env_var).unwrap_or_else(|_| "dev".to_string()), @@ -1124,15 +1153,23 @@ mod tests { std::env::remove_var(&profile_var); } } + (config_var, profile_var) } + /// Runs a test in a separate thread to avoid polluting the process environment. + fn run_in_isolated_env(f: F) + where + F: FnOnce() + Send + 'static, + { + let handle = std::thread::spawn(f); + handle.join().expect("Test thread panicked"); + } + #[test] fn test_remote_k8s_from_env_var_full() { - let (config_var, profile_var) = setup_env_vars( - Some("kubeconfig=/foo.kc,context_name=bar"), - Some("testprof"), - ); + let (config_var, profile_var) = + setup_env_vars(Some("kubeconfig=/foo.kc,context=bar"), Some("testprof")); let cfg = K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); @@ -1159,39 +1196,88 @@ mod tests { #[test] fn test_remote_k8s_from_env_var_only_context() { - let (config_var, profile_var) = setup_env_vars(Some("context_name=bar"), None); + run_in_isolated_env(|| { + unsafe { + std::env::remove_var("KUBECONFIG"); + } + let (config_var, profile_var) = setup_env_vars(Some("context=bar"), None); - let cfg = - K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); + let cfg = + K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); - assert_eq!(cfg.kubeconfig, None); - assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + assert_eq!(cfg.kubeconfig, None); + assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + }); } #[test] fn test_remote_k8s_from_env_var_unknown_key_trim() { - let (config_var, profile_var) = setup_env_vars( - Some(" unknown=bla , kubeconfig= /foo.kc ,context_name= bar "), - None, - ); + run_in_isolated_env(|| { + unsafe { + std::env::remove_var("KUBECONFIG"); + } + let (config_var, profile_var) = setup_env_vars( + Some(" unknown=bla , kubeconfig= /foo.kc ,context= bar "), + None, + ); - let cfg = - K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); + let cfg = + K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); - assert_eq!(cfg.kubeconfig.as_deref(), Some("/foo.kc")); - assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + assert_eq!(cfg.kubeconfig.as_deref(), Some("/foo.kc")); + assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + }); } #[test] fn test_remote_k8s_from_env_var_empty_malformed() { - let (config_var, profile_var) = setup_env_vars(Some("malformed,no=,equal"), None); + run_in_isolated_env(|| { + unsafe { + std::env::remove_var("KUBECONFIG"); + } + let (config_var, profile_var) = setup_env_vars(Some("malformed,no=,equal"), None); - let cfg = - K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); + let cfg = + K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); - // Unknown/malformed ignored, defaults to None - assert_eq!(cfg.kubeconfig, None); - assert_eq!(cfg.k8s_context, None); + // Unknown/malformed ignored, defaults to None + assert_eq!(cfg.kubeconfig, None); + assert_eq!(cfg.k8s_context, None); + }); + } + + #[test] + fn test_remote_k8s_from_env_var_kubeconfig_fallback() { + run_in_isolated_env(|| { + unsafe { + std::env::set_var("KUBECONFIG", "/fallback/path"); + } + let (config_var, profile_var) = setup_env_vars(Some("context=bar"), None); + + let cfg = + K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); + + assert_eq!(cfg.kubeconfig.as_deref(), Some("/fallback/path")); + assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + }); + } + + #[test] + fn test_remote_k8s_from_env_var_kubeconfig_no_fallback_if_provided() { + run_in_isolated_env(|| { + unsafe { + std::env::set_var("KUBECONFIG", "/fallback/path"); + } + let (config_var, profile_var) = + setup_env_vars(Some("kubeconfig=/primary/path,context=bar"), None); + + let cfg = + K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); + + // Primary path should take precedence + assert_eq!(cfg.kubeconfig.as_deref(), Some("/primary/path")); + assert_eq!(cfg.k8s_context.as_deref(), Some("bar")); + }); } #[test] @@ -1202,12 +1288,18 @@ mod tests { } #[test] - fn test_remote_k8s_from_env_var_harmony_profile_default() { - let (config_var, profile_var) = setup_env_vars(Some("kubeconfig=/foo"), None); + fn test_remote_k8s_from_env_var_context_key() { + let (config_var, profile_var) = setup_env_vars( + Some("context=default/api-sto1-harmony-mcd:6443/kube:admin"), + None, + ); let cfg = K8sAnywhereConfig::remote_k8s_from_env_var_with_profile(&config_var, &profile_var); - assert_eq!(cfg.harmony_profile, "dev"); + assert_eq!( + cfg.k8s_context.as_deref(), + Some("default/api-sto1-harmony-mcd:6443/kube:admin") + ); } } diff --git a/harmony/src/domain/topology/k8s_anywhere/postgres.rs b/harmony/src/domain/topology/k8s_anywhere/postgres.rs index 9af9810..7051a1b 100644 --- a/harmony/src/domain/topology/k8s_anywhere/postgres.rs +++ b/harmony/src/domain/topology/k8s_anywhere/postgres.rs @@ -29,19 +29,21 @@ impl PostgreSQL for K8sAnywhereTopology { /// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster. /// Abstracts away storage/retrieval details (e.g., secrets, files). - async fn get_replication_certs(&self, cluster_name: &str) -> Result { + async fn get_replication_certs(&self, config: &PostgreSQLConfig) -> Result { + let cluster_name = &config.cluster_name; + let namespace = &config.namespace; let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?; let replication_secret_name = format!("{cluster_name}-replication"); let replication_secret = k8s_client - .get_resource::(&replication_secret_name, None) + .get_resource::(&replication_secret_name, Some(namespace)) .await .map_err(|e| format!("Failed to get {replication_secret_name}: {e}"))? .ok_or_else(|| format!("Replication secret '{replication_secret_name}' not found"))?; let ca_secret_name = format!("{cluster_name}-ca"); let ca_secret = k8s_client - .get_resource::(&ca_secret_name, None) + .get_resource::(&ca_secret_name, Some(namespace)) .await .map_err(|e| format!("Failed to get {ca_secret_name}: {e}"))? .ok_or_else(|| format!("CA secret '{ca_secret_name}' not found"))?; @@ -79,12 +81,15 @@ impl PostgreSQL for K8sAnywhereTopology { } /// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster. - async fn get_endpoint(&self, cluster_name: &str) -> Result { + async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result { + let cluster_name = &config.cluster_name; + let namespace = &config.namespace; + let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?; let service_name = format!("{cluster_name}-rw"); let service = k8s_client - .get_resource::(&service_name, None) + .get_resource::(&service_name, Some(namespace)) .await .map_err(|e| format!("Failed to get service '{service_name}': {e}"))? .ok_or_else(|| { @@ -104,17 +109,17 @@ impl PostgreSQL for K8sAnywhereTopology { Ok(PostgreSQLEndpoint { host, port: 5432 }) } - /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough). - /// Returns None if no public endpoint (internal-only cluster). - /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex - /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait. - async fn get_public_endpoint( - &self, - cluster_name: &str, - ) -> Result, String> { - // TODO: Implement OpenShift Route lookup targeting '{cluster_name}-rw' service on port 5432 with TLS passthrough - // For now, return None assuming internal-only access or manual route configuration - info!("Public endpoint lookup not implemented for '{cluster_name}', returning None"); - Ok(None) - } + // /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough). + // /// Returns None if no public endpoint (internal-only cluster). + // /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex + // /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait. + // async fn get_public_endpoint( + // &self, + // cluster_name: &str, + // ) -> Result, String> { + // // TODO: Implement OpenShift Route lookup targeting '{cluster_name}-rw' service on port 5432 with TLS passthrough + // // For now, return None assuming internal-only access or manual route configuration + // info!("Public endpoint lookup not implemented for '{cluster_name}', returning None"); + // Ok(None) + // } } diff --git a/harmony/src/domain/topology/router.rs b/harmony/src/domain/topology/router.rs index f5db5bf..26aa572 100644 --- a/harmony/src/domain/topology/router.rs +++ b/harmony/src/domain/topology/router.rs @@ -110,4 +110,18 @@ pub trait TlsRouter: Send + Sync { /// Example: OKD Route{ host, to: backend:target_port, tls: {passthrough} }; /// HAProxy frontend→backend \"postgres-upstream\". async fn install_route(&self, config: TlsRoute) -> Result<(), String>; + + /// Gets the base domain that can be used to deploy applications that will be automatically + /// routed to this cluster. + /// + /// For example, if we have *.apps.nationtech.io pointing to a public load balancer, then this + /// function would return + /// + /// ``` + /// Some(String::new("apps.nationtech.io")) + /// ``` + async fn get_wildcard_domain(&self) -> Result, String>; + + /// Returns the port that this router exposes externally. + async fn get_router_port(&self) -> u16; } diff --git a/harmony/src/modules/network/failover.rs b/harmony/src/modules/network/failover.rs index 897abbd..9c99c5e 100644 --- a/harmony/src/modules/network/failover.rs +++ b/harmony/src/modules/network/failover.rs @@ -5,6 +5,10 @@ use crate::topology::{FailoverTopology, TlsRoute, TlsRouter}; #[async_trait] impl TlsRouter for FailoverTopology { + async fn get_wildcard_domain(&self) -> Result, String> {todo!()} + + /// Returns the port that this router exposes externally. + async fn get_router_port(&self) -> u16 {todo!()} async fn install_route(&self, config: TlsRoute) -> Result<(), String> { warn!( "Failover topology TlsRouter capability currently defers to the primary only. Make sure to check this is OK for you. The Replica Topology WILL NOT be affected here" diff --git a/harmony/src/modules/postgresql/capability.rs b/harmony/src/modules/postgresql/capability.rs index a2d6a91..2de12a8 100644 --- a/harmony/src/modules/postgresql/capability.rs +++ b/harmony/src/modules/postgresql/capability.rs @@ -9,19 +9,19 @@ pub trait PostgreSQL: Send + Sync { /// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster. /// Abstracts away storage/retrieval details (e.g., secrets, files). - async fn get_replication_certs(&self, cluster_name: &str) -> Result; + async fn get_replication_certs(&self, config: &PostgreSQLConfig) -> Result; /// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster. - async fn get_endpoint(&self, cluster_name: &str) -> Result; + async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result; - /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough). - /// Returns None if no public endpoint (internal-only cluster). - /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex - /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait. - async fn get_public_endpoint( - &self, - cluster_name: &str, - ) -> Result, String>; + // /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough). + // /// Returns None if no public endpoint (internal-only cluster). + // /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex + // /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait. + // async fn get_public_endpoint( + // &self, + // cluster_name: &str, + // ) -> Result, String>; } #[derive(Clone, Debug, Serialize)] @@ -34,6 +34,13 @@ pub struct PostgreSQLConfig { /// settings incompatible with the default CNPG behavior. pub namespace: String, } +impl PostgreSQLConfig { + pub fn with_namespace(&self, namespace: &str) -> PostgreSQLConfig { + let mut new = self.clone(); + new.namespace = namespace.to_string(); + new + } +} impl Default for PostgreSQLConfig { fn default() -> Self { diff --git a/harmony/src/modules/postgresql/failover.rs b/harmony/src/modules/postgresql/failover.rs index 1df5008..10fd654 100644 --- a/harmony/src/modules/postgresql/failover.rs +++ b/harmony/src/modules/postgresql/failover.rs @@ -3,6 +3,7 @@ use log::debug; use log::info; use std::collections::HashMap; +use crate::topology::TlsRouter; use crate::{ modules::postgresql::capability::{ BootstrapConfig, BootstrapStrategy, ExternalClusterConfig, PostgreSQL, @@ -13,7 +14,7 @@ use crate::{ }; #[async_trait] -impl PostgreSQL for FailoverTopology { +impl PostgreSQL for FailoverTopology { async fn deploy(&self, config: &PostgreSQLConfig) -> Result { info!( "Starting deployment of failover topology '{}'", @@ -39,20 +40,19 @@ impl PostgreSQL for FailoverTopology { info!("Retrieving replication certificates for primary '{primary_cluster_name}'"); - let certs = self - .primary - .get_replication_certs(&primary_cluster_name) - .await?; + let certs = self.primary.get_replication_certs(&primary_config).await?; info!("Replication certificates retrieved successfully"); info!("Retrieving public endpoint for primary '{primary_cluster_name}"); - let endpoint = self - .primary - .get_public_endpoint(&primary_cluster_name) - .await? - .ok_or_else(|| "No public endpoint configured on primary cluster".to_string())?; + // TODO we should be getting the public endpoint for a service by calling a method on + // TlsRouter capability. + // Something along the lines of `TlsRouter::get_hostname_for_service(...).await?;` + let endpoint = PostgreSQLEndpoint { + host: "postgrestest.sto1.nationtech.io".to_string(), + port: self.primary.get_router_port().await, + }; info!( "Public endpoint '{}:{}' retrieved for primary", @@ -110,18 +110,21 @@ impl PostgreSQL for FailoverTopology { Ok(primary_cluster_name) } - async fn get_replication_certs(&self, cluster_name: &str) -> Result { - self.primary.get_replication_certs(cluster_name).await - } - - async fn get_endpoint(&self, cluster_name: &str) -> Result { - self.primary.get_endpoint(cluster_name).await - } - - async fn get_public_endpoint( + async fn get_replication_certs( &self, - cluster_name: &str, - ) -> Result, String> { - self.primary.get_public_endpoint(cluster_name).await + config: &PostgreSQLConfig, + ) -> Result { + self.primary.get_replication_certs(config).await } + + async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result { + self.primary.get_endpoint(config).await + } + + // async fn get_public_endpoint( + // &self, + // cluster_name: &str, + // ) -> Result, String> { + // self.primary.get_public_endpoint(cluster_name).await + // } } diff --git a/harmony/src/modules/postgresql/score_public.rs b/harmony/src/modules/postgresql/score_public.rs index 1ebba70..44298cf 100644 --- a/harmony/src/modules/postgresql/score_public.rs +++ b/harmony/src/modules/postgresql/score_public.rs @@ -6,9 +6,9 @@ use crate::data::Version; use crate::domain::topology::router::{TlsRoute, TlsRouter}; use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; use crate::inventory::Inventory; -use crate::modules::postgresql::K8sPostgreSQLScore; +use crate::modules::postgresql::capability::{PostgreSQL, PostgreSQLConfig}; use crate::score::Score; -use crate::topology::{K8sclient, Topology}; +use crate::topology::Topology; /// Deploys a public PostgreSQL cluster: CNPG + TLS passthrough route for RW endpoint. /// For failover/multisite: exposes single-instance or small HA Postgres publicly. @@ -23,7 +23,7 @@ use crate::topology::{K8sclient, Topology}; #[derive(Debug, Clone, Serialize)] pub struct PublicPostgreSQLScore { /// Inner non-public Postgres cluster config. - pub postgres_score: K8sPostgreSQLScore, + pub config: PostgreSQLConfig, /// Public hostname for RW TLS passthrough (port 443 → cluster-rw:5432). pub hostname: String, } @@ -31,24 +31,24 @@ pub struct PublicPostgreSQLScore { impl PublicPostgreSQLScore { pub fn new(namespace: &str, hostname: &str) -> Self { Self { - postgres_score: K8sPostgreSQLScore::new(namespace), + config: PostgreSQLConfig::default().with_namespace(namespace), hostname: hostname.to_string(), } } } -impl Score for PublicPostgreSQLScore { +impl Score for PublicPostgreSQLScore { fn create_interpret(&self) -> Box> { - let rw_backend = format!("{}-rw", self.postgres_score.config.cluster_name); + let rw_backend = format!("{}-rw", self.config.cluster_name); let tls_route = TlsRoute { - namespace: self.postgres_score.config.namespace.clone(), + namespace: self.config.namespace.clone(), hostname: self.hostname.clone(), backend: rw_backend, target_port: 5432, }; Box::new(PublicPostgreSQLInterpret { - postgres_score: self.postgres_score.clone(), + config: self.config.clone(), tls_route, }) } @@ -56,7 +56,7 @@ impl Score for PublicPostg fn name(&self) -> String { format!( "PublicPostgreSQLScore({}:{})", - self.postgres_score.config.namespace, self.hostname + self.config.namespace, self.hostname ) } } @@ -64,12 +64,14 @@ impl Score for PublicPostg /// Custom interpret: deploy Postgres then install public TLS route. #[derive(Debug, Clone)] struct PublicPostgreSQLInterpret { - postgres_score: K8sPostgreSQLScore, + config: PostgreSQLConfig, tls_route: TlsRoute, } #[async_trait] -impl Interpret for PublicPostgreSQLInterpret { +impl Interpret + for PublicPostgreSQLInterpret +{ fn get_name(&self) -> InterpretName { InterpretName::Custom("PublicPostgreSQLInterpret") } @@ -82,9 +84,11 @@ impl Interpret for PublicP fn get_children(&self) -> Vec { todo!() } - async fn execute(&self, inventory: &Inventory, topo: &T) -> Result { + async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result { // Deploy CNPG cluster first (creates -rw service) - self.postgres_score.interpret(inventory, topo).await?; + topo.deploy(&self.config) + .await + .map_err(|e| InterpretError::new(e))?; // Expose RW publicly via TLS passthrough topo.install_route(self.tls_route.clone()) @@ -93,7 +97,7 @@ impl Interpret for PublicP Ok(Outcome::success(format!( "Public CNPG cluster '{}' deployed with TLS passthrough route '{}'", - self.postgres_score.config.cluster_name.clone(), + self.config.cluster_name.clone(), self.tls_route.hostname ))) }