diff --git a/examples/monitoring/src/main.rs b/examples/monitoring/src/main.rs index da9c1c1..989b1ec 100644 --- a/examples/monitoring/src/main.rs +++ b/examples/monitoring/src/main.rs @@ -7,7 +7,13 @@ use harmony::{ monitoring::{ alert_channel::discord_alert_channel::DiscordWebhook, alert_rule::prometheus_alert_rule::AlertManagerRuleGroup, - kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore, + kube_prometheus::{ + helm_prometheus_alert_score::HelmPrometheusAlertingScore, + types::{ + HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor, + ServiceMonitorEndpoint, + }, + }, }, prometheus::alerts::{ infra::dell_server::{ diff --git a/examples/monitoring_with_tenant/src/main.rs b/examples/monitoring_with_tenant/src/main.rs index 080cea7..ec80542 100644 --- a/examples/monitoring_with_tenant/src/main.rs +++ b/examples/monitoring_with_tenant/src/main.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use harmony::{ data::Id, inventory::Inventory, @@ -6,7 +8,13 @@ use harmony::{ monitoring::{ alert_channel::discord_alert_channel::DiscordWebhook, alert_rule::prometheus_alert_rule::AlertManagerRuleGroup, - kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore, + kube_prometheus::{ + helm_prometheus_alert_score::HelmPrometheusAlertingScore, + types::{ + HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor, + ServiceMonitorEndpoint, + }, + }, }, prometheus::alerts::k8s::pvc::high_pvc_fill_rate_over_two_days, tenant::TenantScore, @@ -44,9 +52,31 @@ async fn main() { let additional_rules = AlertManagerRuleGroup::new("pvc-alerts", vec![high_pvc_fill_rate_over_two_days_alert]); + let service_monitor_endpoint = ServiceMonitorEndpoint { + port: Some("80".to_string()), + path: "/metrics".to_string(), + scheme: HTTPScheme::HTTP, + ..Default::default() + }; + + let service_monitor = ServiceMonitor { + name: "test-service-monitor".to_string(), + selector: Selector { + match_labels: HashMap::new(), + match_expressions: vec![MatchExpression { + key: "test".to_string(), + operator: Operator::In, + values: vec!["test-service".to_string()], + }], + }, + endpoints: vec![service_monitor_endpoint], + ..Default::default() + }; + let alerting_score = HelmPrometheusAlertingScore { receivers: vec![Box::new(discord_receiver)], rules: vec![Box::new(additional_rules)], + service_monitors: vec![service_monitor], }; let mut maestro = Maestro::::initialize( Inventory::autoload(), diff --git a/harmony/src/domain/topology/installable.rs b/harmony/src/domain/topology/installable.rs index 0e81448..72b7b31 100644 --- a/harmony/src/domain/topology/installable.rs +++ b/harmony/src/domain/topology/installable.rs @@ -4,7 +4,7 @@ use crate::{interpret::InterpretError, inventory::Inventory}; #[async_trait] pub trait Installable: Send + Sync { - fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>; + async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>; async fn ensure_installed( &self, diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index cb5cd76..5eebd1d 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -39,7 +39,6 @@ pub struct K8sAnywhereTopology { k8s_state: Arc>>, tenant_manager: Arc>, config: Arc, - tenant_manager_config: OnceCell, } #[async_trait] @@ -74,7 +73,6 @@ impl K8sAnywhereTopology { k8s_state: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()), config: Arc::new(K8sAnywhereConfig::from_env()), - tenant_manager_config: OnceCell::new(), } } @@ -83,7 +81,6 @@ impl K8sAnywhereTopology { k8s_state: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()), config: Arc::new(config), - tenant_manager_config: OnceCell::new(), } } @@ -199,16 +196,10 @@ impl K8sAnywhereTopology { let k8s_client = self.k8s_client().await?; Ok(K8sTenantManager::new(k8s_client)) }) - .await - .unwrap(); + .await?; Ok(()) } - async fn store_tenant_config(&self, config: TenantConfig) { - self.tenant_manager_config - .get_or_init(|| async { config }) - .await; - } fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> { match self.tenant_manager.get() { @@ -289,13 +280,15 @@ impl HelmCommand for K8sAnywhereTopology {} #[async_trait] impl TenantManager for K8sAnywhereTopology { async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> { - self.store_tenant_config(config.clone()).await; self.get_k8s_tenant_manager()? .provision_tenant(config) .await } - fn get_tenant_config(&self) -> Option { - self.tenant_manager_config.get().cloned() + async fn get_tenant_config(&self) -> Option { + self.get_k8s_tenant_manager() + .ok()? + .get_tenant_config() + .await } } diff --git a/harmony/src/domain/topology/oberservability/monitoring.rs b/harmony/src/domain/topology/oberservability/monitoring.rs index f65e159..6d60c7a 100644 --- a/harmony/src/domain/topology/oberservability/monitoring.rs +++ b/harmony/src/domain/topology/oberservability/monitoring.rs @@ -27,7 +27,7 @@ impl, T: Topology> Interpret for AlertingInte inventory: &Inventory, topology: &T, ) -> Result { - self.sender.configure(inventory, topology)?; + self.sender.configure(inventory, topology).await?; for receiver in self.receivers.iter() { receiver.install(&self.sender).await?; } diff --git a/harmony/src/domain/topology/tenant/k8s.rs b/harmony/src/domain/topology/tenant/k8s.rs index 36cf9f0..723c0d9 100644 --- a/harmony/src/domain/topology/tenant/k8s.rs +++ b/harmony/src/domain/topology/tenant/k8s.rs @@ -5,7 +5,6 @@ use crate::{ topology::k8s::{ApplyStrategy, K8sClient}, }; use async_trait::async_trait; -use derive_new::new; use k8s_openapi::{ api::{ core::v1::{LimitRange, Namespace, ResourceQuota}, @@ -19,12 +18,23 @@ use kube::Resource; use log::{debug, info, warn}; use serde::de::DeserializeOwned; use serde_json::json; +use tokio::sync::OnceCell; use super::{TenantConfig, TenantManager}; -#[derive(new, Clone, Debug)] +#[derive(Clone, Debug)] pub struct K8sTenantManager { k8s_client: Arc, + k8s_tenant_config: Arc>, +} + +impl K8sTenantManager { + pub fn new(client: Arc) -> Self { + Self { + k8s_client: client, + k8s_tenant_config: Arc::new(OnceCell::new()), + } + } } impl K8sTenantManager { @@ -147,7 +157,7 @@ impl K8sTenantManager { "spec": { "limits": [ { - "type": "Container", + "type": "Container", "default": { "cpu": "500m", "memory": "500Mi" @@ -391,6 +401,9 @@ impl K8sTenantManager { Ok(network_policy) } + fn store_config(&self, config: &TenantConfig) { + let _ = self.k8s_tenant_config.set(config.clone()); + } } #[async_trait] @@ -419,9 +432,10 @@ impl TenantManager for K8sTenantManager { "Success provisionning K8s tenant id {} name {}", config.id, config.name ); + self.store_config(config); Ok(()) } - fn get_tenant_config(&self) -> Option { - todo!() + async fn get_tenant_config(&self) -> Option { + self.k8s_tenant_config.get().cloned() } } diff --git a/harmony/src/domain/topology/tenant/manager.rs b/harmony/src/domain/topology/tenant/manager.rs index 0e0d426..d7c75ce 100644 --- a/harmony/src/domain/topology/tenant/manager.rs +++ b/harmony/src/domain/topology/tenant/manager.rs @@ -16,5 +16,5 @@ pub trait TenantManager { /// * `config`: The desired configuration for the new tenant. async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>; - fn get_tenant_config(&self) -> Option; + async fn get_tenant_config(&self) -> Option; } diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs index 6158f5f..3fd773f 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs @@ -11,7 +11,9 @@ use std::{ use crate::modules::{ helm::chart::HelmChartScore, monitoring::kube_prometheus::types::{ - AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig, AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits, PrometheusConfig, Requests, Resources + AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig, + AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits, + PrometheusConfig, Requests, Resources, }, }; diff --git a/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs b/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs index 0216957..fdf2057 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/prometheus.rs @@ -35,8 +35,8 @@ impl AlertSender for Prometheus { #[async_trait] impl Installable for Prometheus { - fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> { - self.configure_with_topology(topology); + async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> { + self.configure_with_topology(topology).await; Ok(()) } @@ -62,9 +62,10 @@ impl Prometheus { } } - pub fn configure_with_topology(&self, topology: &T) { + pub async fn configure_with_topology(&self, topology: &T) { let ns = topology .get_tenant_config() + .await .map(|cfg| cfg.name.clone()) .unwrap_or_else(|| "monitoring".to_string()); error!("This must be refactored, see comments in pr #74");