diff --git a/examples/ntfy/src/main.rs b/examples/ntfy/src/main.rs index f359a61..fc04e6e 100644 --- a/examples/ntfy/src/main.rs +++ b/examples/ntfy/src/main.rs @@ -14,6 +14,7 @@ async fn main() { maestro.register_all(vec![Box::new(NtfyScore { namespace: "monitoring".to_string(), + host: "localhost".to_string(), })]); harmony_cli::init(maestro, None).await.unwrap(); } diff --git a/harmony/src/modules/application/features/continuous_delivery.rs b/harmony/src/modules/application/features/continuous_delivery.rs index 39513ab..7bcfb38 100644 --- a/harmony/src/modules/application/features/continuous_delivery.rs +++ b/harmony/src/modules/application/features/continuous_delivery.rs @@ -159,8 +159,8 @@ impl< info!("Pushed new helm chart {helm_chart}"); error!("TODO Make building image configurable/skippable"); - let image = self.application.build_push_oci_image().await?; - info!("Pushed new docker image {image}"); + // let image = self.application.build_push_oci_image().await?; + // info!("Pushed new docker image {image}"); info!("Installing ContinuousDelivery feature"); // TODO this is a temporary hack for demo purposes, the deployment target should be driven diff --git a/harmony/src/modules/application/features/monitoring.rs b/harmony/src/modules/application/features/monitoring.rs index c89fa38..61429ea 100644 --- a/harmony/src/modules/application/features/monitoring.rs +++ b/harmony/src/modules/application/features/monitoring.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; -use log::info; +use log::{debug, info}; use crate::{ inventory::Inventory, @@ -9,7 +9,10 @@ use crate::{ monitoring::{ alert_channel::webhook_receiver::WebhookReceiver, application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore, - kube_prometheus::types::{NamespaceSelector, ServiceMonitor}, + kube_prometheus::{ + helm_prometheus_alert_score::HelmPrometheusAlertingScore, + types::{NamespaceSelector, ServiceMonitor}, + }, ntfy::ntfy::NtfyScore, }, }, @@ -28,11 +31,13 @@ impl Applicatio info!("Ensuring monitoring is available for application"); let ntfy = NtfyScore { - namespace: topology - .get_tenant_config() - .await - .expect("couldn't get tenant config") - .name, + // namespace: topology + // .get_tenant_config() + // .await + // .expect("couldn't get tenant config") + // .name, + namespace: "harmonydemo-staging".to_string(), + host: "localhost".to_string(), }; ntfy.create_interpret() .execute(&Inventory::empty(), topology) @@ -48,11 +53,13 @@ impl Applicatio )) ); + debug!("ntfy_default_auth_header: {ntfy_default_auth_header}"); + let ntfy_default_auth_param = general_purpose::STANDARD .encode(ntfy_default_auth_header) - .rsplit("=") - .collect::>()[0] - .to_string(); + .replace("=", ""); + + debug!("ntfy_default_auth_param: {ntfy_default_auth_param}"); let ntfy_receiver = WebhookReceiver { name: "ntfy-webhook".to_string(), @@ -60,7 +67,7 @@ impl Applicatio url::Url::parse( format!( "http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}", - topology.get_tenant_config().await.expect("couldn't get tenant config").name + "harmonydemo-staging".to_string() ) .as_str(), ) @@ -73,7 +80,16 @@ impl Applicatio any: true, match_names: vec![], }); - let alerting_score = ApplicationPrometheusMonitoringScore { + + service_monitor.name = "rust-webapp".to_string(); + + // let alerting_score = ApplicationPrometheusMonitoringScore { + // receivers: vec![Box::new(ntfy_receiver)], + // rules: vec![], + // service_monitors: vec![service_monitor], + // }; + + let alerting_score = HelmPrometheusAlertingScore { receivers: vec![Box::new(ntfy_receiver)], rules: vec![], service_monitors: vec![service_monitor], diff --git a/harmony/src/modules/monitoring/application_monitoring/k8s_application_monitoring_score.rs b/harmony/src/modules/monitoring/application_monitoring/k8s_application_monitoring_score.rs index 29e2893..f4a6c1b 100644 --- a/harmony/src/modules/monitoring/application_monitoring/k8s_application_monitoring_score.rs +++ b/harmony/src/modules/monitoring/application_monitoring/k8s_application_monitoring_score.rs @@ -24,7 +24,10 @@ pub struct ApplicationPrometheusMonitoringScore { impl Score for ApplicationPrometheusMonitoringScore { fn create_interpret(&self) -> Box> { - let config = Arc::new(Mutex::new(PrometheusConfig::new())); + let mut prom_config = PrometheusConfig::new(); + prom_config.alert_manager = true; + + let config = Arc::new(Mutex::new(prom_config)); config .try_lock() .expect("couldn't lock config") diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs b/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs index 3f273c6..041e5f0 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm/config.rs @@ -38,15 +38,15 @@ impl KubePrometheusConfig { node_exporter: false, prometheus: true, kubernetes_service_monitors: true, - kubernetes_api_server: false, + kubernetes_api_server: true, kubelet: true, - kube_controller_manager: false, - kube_etcd: false, - kube_proxy: false, + kube_controller_manager: true, + kube_etcd: true, + kube_proxy: true, kube_state_metrics: true, prometheus_operator: true, - core_dns: false, - kube_scheduler: false, + core_dns: true, + kube_scheduler: true, alert_receiver_configs: vec![], alert_rules: vec![], additional_service_monitors: vec![], diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs index 14d9f5f..22b2f7a 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm/kube_prometheus_helm_chart.rs @@ -70,12 +70,12 @@ pub fn kube_prometheus_helm_chart_score( r#" global: rbac: - create: false + create: true prometheus: enabled: {prometheus} prometheusSpec: resources: - requests: + requests: cpu: 100m memory: 500Mi limits: @@ -121,7 +121,7 @@ defaultRules: windowsMonitoring: enabled: {windows_monitoring} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -130,13 +130,13 @@ windowsMonitoring: grafana: enabled: {grafana} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: cpu: 200m memory: 250Mi - initChownData: + initChownData: resources: requests: cpu: 10m @@ -157,7 +157,7 @@ kubernetesServiceMonitors: kubeApiServer: enabled: {kubernetes_api_server} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -166,7 +166,7 @@ kubeApiServer: kubelet: enabled: {kubelet} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -175,7 +175,7 @@ kubelet: kubeControllerManager: enabled: {kube_controller_manager} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -184,7 +184,7 @@ kubeControllerManager: coreDns: enabled: {core_dns} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -193,7 +193,7 @@ coreDns: kubeEtcd: enabled: {kube_etcd} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -202,7 +202,7 @@ kubeEtcd: kubeScheduler: enabled: {kube_scheduler} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -211,7 +211,7 @@ kubeScheduler: kubeProxy: enabled: {kube_proxy} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -221,7 +221,7 @@ kubeStateMetrics: enabled: {kube_state_metrics} kube-state-metrics: resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -230,7 +230,7 @@ kube-state-metrics: nodeExporter: enabled: {node_exporter} resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -238,16 +238,16 @@ nodeExporter: memory: 250Mi prometheus-node-exporter: resources: - requests: + requests: cpu: 100m memory: 150Mi limits: cpu: 200m memory: 250Mi prometheusOperator: - enabled: false + enabled: true resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -255,7 +255,7 @@ prometheusOperator: memory: 200Mi prometheusConfigReloader: resources: - requests: + requests: cpu: 100m memory: 150Mi limits: @@ -267,7 +267,7 @@ prometheusOperator: limits: cpu: 10m memory: 100Mi - requests: + requests: cpu: 10m memory: 100Mi patch: @@ -275,7 +275,7 @@ prometheusOperator: limits: cpu: 10m memory: 100Mi - requests: + requests: cpu: 10m memory: 100Mi "#, diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs index 0ab5f1b..c9a0c04 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs @@ -28,7 +28,7 @@ impl Score for HelmPrometheusAlert .expect("couldn't lock config") .additional_service_monitors = self.service_monitors.clone(); Box::new(AlertingInterpret { - sender: KubePrometheus::new(), + sender: KubePrometheus { config }, receivers: self.receivers.clone(), rules: self.rules.clone(), }) diff --git a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs index db7d9c4..076a8a3 100644 --- a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs +++ b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use crate::modules::helm::chart::{HelmChartScore, HelmRepository}; -pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore { +pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore { let values = format!( r#" replicaCount: 1 @@ -28,12 +28,12 @@ service: port: 80 ingress: - enabled: false + enabled: true # annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" hosts: - - host: ntfy.host.com + - host: {host} paths: - path: / pathType: ImplementationSpecific diff --git a/harmony/src/modules/monitoring/ntfy/ntfy.rs b/harmony/src/modules/monitoring/ntfy/ntfy.rs index 98d4fff..773b0ad 100644 --- a/harmony/src/modules/monitoring/ntfy/ntfy.rs +++ b/harmony/src/modules/monitoring/ntfy/ntfy.rs @@ -17,6 +17,7 @@ use crate::{ #[derive(Debug, Clone, Serialize)] pub struct NtfyScore { pub namespace: String, + pub host: String, } impl Score for NtfyScore { @@ -126,7 +127,7 @@ impl Interpret for NtfyInterpret { inventory: &Inventory, topology: &T, ) -> Result { - ntfy_helm_chart_score(self.score.namespace.clone()) + ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone()) .create_interpret() .execute(inventory, topology) .await?;