diff --git a/harmony/src/domain/topology/installable.rs b/harmony/src/domain/topology/installable.rs index 8d8178c..0e81448 100644 --- a/harmony/src/domain/topology/installable.rs +++ b/harmony/src/domain/topology/installable.rs @@ -4,6 +4,8 @@ use crate::{interpret::InterpretError, inventory::Inventory}; #[async_trait] pub trait Installable: Send + Sync { + fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>; + async fn ensure_installed( &self, inventory: &Inventory, diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index 6742b5a..61a0642 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -35,6 +35,7 @@ enum K8sSource { pub struct K8sAnywhereTopology { k8s_state: OnceCell>, tenant_manager: OnceCell, + tenant_manager_config: OnceCell, config: K8sAnywhereConfig, } @@ -60,6 +61,7 @@ impl K8sAnywhereTopology { Self { k8s_state: OnceCell::new(), tenant_manager: OnceCell::new(), + tenant_manager_config: OnceCell::new(), config: K8sAnywhereConfig::from_env(), } } @@ -68,6 +70,7 @@ impl K8sAnywhereTopology { Self { k8s_state: OnceCell::new(), tenant_manager: OnceCell::new(), + tenant_manager_config: OnceCell::new(), config, } } @@ -182,7 +185,7 @@ impl K8sAnywhereTopology { self.tenant_manager .get_or_try_init(async || -> Result { let k8s_client = self.k8s_client().await?; - Ok(K8sTenantManager::new(k8s_client)) + Ok(K8sTenantManager::new(k8s_client, TenantConfig::default())) }) .await .unwrap(); @@ -272,4 +275,8 @@ impl TenantManager for K8sAnywhereTopology { .provision_tenant(config) .await } + + fn get_tenant_config(&self) -> Option { + self.tenant_manager_config.get().cloned() + } } diff --git a/harmony/src/domain/topology/oberservability/monitoring.rs b/harmony/src/domain/topology/oberservability/monitoring.rs index ed7e936..f65e159 100644 --- a/harmony/src/domain/topology/oberservability/monitoring.rs +++ b/harmony/src/domain/topology/oberservability/monitoring.rs @@ -27,6 +27,7 @@ impl, T: Topology> Interpret for AlertingInte inventory: &Inventory, topology: &T, ) -> Result { + self.sender.configure(inventory, topology)?; for receiver in self.receivers.iter() { receiver.install(&self.sender).await?; } diff --git a/harmony/src/domain/topology/tenant/k8s.rs b/harmony/src/domain/topology/tenant/k8s.rs index a03e8d7..45f4530 100644 --- a/harmony/src/domain/topology/tenant/k8s.rs +++ b/harmony/src/domain/topology/tenant/k8s.rs @@ -25,6 +25,7 @@ use super::{TenantConfig, TenantManager}; #[derive(new)] pub struct K8sTenantManager { k8s_client: Arc, + k8s_tenant_config: TenantConfig, } impl K8sTenantManager { @@ -324,4 +325,7 @@ impl TenantManager for K8sTenantManager { ); Ok(()) } + fn get_tenant_config(&self) -> Option { + Some(self.k8s_tenant_config.clone()) + } } diff --git a/harmony/src/domain/topology/tenant/manager.rs b/harmony/src/domain/topology/tenant/manager.rs index 0df380d..0e0d426 100644 --- a/harmony/src/domain/topology/tenant/manager.rs +++ b/harmony/src/domain/topology/tenant/manager.rs @@ -15,4 +15,6 @@ pub trait TenantManager { /// # Arguments /// * `config`: The desired configuration for the new tenant. async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>; + + fn get_tenant_config(&self) -> Option; } diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs b/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs index ecbf8d8..3aede84 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs @@ -1,13 +1,12 @@ use serde::Serialize; -use crate::modules::monitoring::{ - alert_rule::prometheus_alert_rule::AlertManagerRuleGroup, - kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig}, -}; +use crate::modules::monitoring:: + kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig} +; #[derive(Debug, Clone, Serialize)] pub struct KubePrometheusConfig { - pub namespace: String, + pub namespace: Option, pub default_rules: bool, pub windows_monitoring: bool, pub alert_manager: bool, @@ -30,7 +29,7 @@ pub struct KubePrometheusConfig { impl KubePrometheusConfig { pub fn new() -> Self { Self { - namespace: "monitoring".into(), + namespace: None, default_rules: true, windows_monitoring: false, alert_manager: true, diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs index 843a677..9616c4b 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs @@ -184,8 +184,9 @@ prometheus: values.push_str(&alert_manager_additional_rules_yaml); debug!("full values.yaml: \n {:#}", values); + HelmChartScore { - namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()), + namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()), release_name: NonBlankString::from_str("kube-prometheus").unwrap(), chart_name: NonBlankString::from_str( "oci://ghcr.io/prometheus-community/charts/kube-prometheus-stack", diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs index 8844309..a47fa92 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs @@ -8,6 +8,7 @@ use crate::{ topology::{ HelmCommand, Topology, oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret}, + tenant::TenantManager, }, }; @@ -17,12 +18,10 @@ pub struct HelmPrometheusAlertingScore { pub rules: Vec>>, } -impl Score for HelmPrometheusAlertingScore { +impl Score for HelmPrometheusAlertingScore { fn create_interpret(&self) -> Box> { Box::new(AlertingInterpret { - sender: Prometheus { - config: Arc::new(Mutex::new(KubePrometheusConfig::new())), - }, + sender: Prometheus::new() , receivers: self.receivers.clone(), rules: self.rules.clone(), }) diff --git a/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs b/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs index 148f91c..acd9dae 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs @@ -10,9 +10,10 @@ use crate::{ modules::monitoring::alert_rule::prometheus_alert_rule::AlertManagerRuleGroup, score, topology::{ - HelmCommand, Topology, + HelmCommand, K8sAnywhereTopology, Topology, installable::Installable, oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender}, + tenant::TenantManager, }, }; @@ -33,7 +34,12 @@ impl AlertSender for Prometheus { } #[async_trait] -impl Installable for Prometheus { +impl Installable for Prometheus { + fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> { + self.configure_with_topology(topology); + Ok(()) + } + async fn ensure_installed( &self, inventory: &Inventory, @@ -50,6 +56,18 @@ pub struct Prometheus { } impl Prometheus { + pub fn new() -> Self { + Self { + config: Arc::new(Mutex::new(KubePrometheusConfig::new())), + } + } + + pub fn configure_with_topology(&self, topology: &T) { + let ns = topology.get_tenant_config().map(|cfg| cfg.name.clone()) + .unwrap_or_else(|| "monitoring".to_string()); + self.config.lock().unwrap().namespace = Some(ns); + } + pub async fn install_receiver( &self, prometheus_receiver: &dyn PrometheusReceiver,