feat: depoloys cluster monitoring stack from monitoring score on k8sanywhere topology
All checks were successful
Run Check Script / check (push) Successful in 1m46s
Run Check Script / check (pull_request) Successful in 1m47s

This commit is contained in:
Willem 2025-06-11 15:06:39 -04:00
parent 238d1f85e2
commit 613def5e0b
10 changed files with 154 additions and 98 deletions

View File

@ -0,0 +1,12 @@
[package]
name = "webhook_sender"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true
url.workspace = true

View File

@ -0,0 +1,23 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::monitoring::monitoring_alerting::MonitoringAlertingScore,
topology::{K8sAnywhereTopology, oberservability::K8sMonitorConfig},
};
#[tokio::main]
async fn main() {
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
let monitoring = MonitoringAlertingScore {
alert_channel_configs: None,
};
maestro.register_all(vec![Box::new(monitoring)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -10,7 +10,10 @@ use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
maestro::Maestro,
modules::{k3d::K3DInstallationScore, monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score},
modules::{
k3d::K3DInstallationScore,
monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
},
topology::LocalhostTopology,
};
@ -193,9 +196,7 @@ impl K8sAnywhereTopology {
self.k8s_monitor
.get_or_try_init(async || -> Result<K8sMonitor, String> {
let config = K8sMonitorConfig {
chart: kube_prometheus_helm_chart_score(),
};
let config = K8sMonitorConfig::cluster_monitor();
Ok(K8sMonitor { config })
})
.await
@ -302,12 +303,14 @@ impl TenantManager for K8sAnywhereTopology {
}
#[async_trait]
impl Monitor for K8sAnywhereTopology {
async fn provision_monitor(
async fn provision_monitor<T: Topology + HelmCommand>(
&self,
inventory: &Inventory,
topology: &T,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError> {
self.get_k8s_monitor()?
.provision_monitor(alert_receivers)
.provision_monitor(inventory, topology, alert_receivers)
.await
}

View File

@ -3,14 +3,13 @@ use std::sync::Arc;
use async_trait::async_trait;
use serde::Serialize;
use crate::score::Score;
use crate::topology::HelmCommand;
use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
modules::{helm::chart::HelmChartInterpret, monitoring::kube_prometheus::{
config::KubePrometheusConfig,
kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
}},
topology::{K8sAnywhereTopology, Topology},
topology::Topology,
};
use super::{
@ -25,19 +24,23 @@ pub struct K8sMonitor {
#[async_trait]
impl Monitor for K8sMonitor {
async fn provision_monitor(
async fn provision_monitor<T: Topology + HelmCommand>(
&self,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>,
inventory: &Inventory,
topology: &T,
alert_channels: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError> {
if let Some(receivers) = alert_receivers {
let alert_channels = self.build_alert_channels(receivers).await?;
if let Some(channels) = alert_channels {
let alert_channels = self.build_alert_channels(channels).await?;
for channel in alert_channels {
channel.register_alert_channel().await?;
}
}
let chart = self.config.chart.clone();
//
chart.create_interpret();
chart
.create_interpret()
.execute(inventory, topology)
.await?;
Ok(Outcome::success("installed monitor".to_string()))
}

View File

@ -1,12 +1,23 @@
use serde::Serialize;
use crate::modules::helm::chart::HelmChartScore;
use crate::modules::{
helm::chart::HelmChartScore,
monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
};
pub mod monitoring;
pub mod k8s;
pub mod monitoring;
#[derive(Debug, Clone, Serialize)]
pub struct K8sMonitorConfig {
//probably need to do something better here
pub chart: HelmChartScore,
}
impl K8sMonitorConfig {
pub fn cluster_monitor() -> Self {
Self {
chart: kube_prometheus_helm_chart_score(),
}
}
}

View File

@ -1,13 +1,12 @@
use async_trait::async_trait;
use dyn_clone::DynClone;
use serde::Serialize;
use std::fmt::Debug;
use std::sync::Arc;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::inventory::Inventory;
use crate::topology::HelmCommand;
use crate::{interpret::Outcome, topology::Topology};
/// Represents an entity responsible for collecting and organizing observability data
@ -16,8 +15,10 @@ use crate::{interpret::Outcome, topology::Topology};
/// monitoring data, enabling consistent processing regardless of the underlying data source.
#[async_trait]
pub trait Monitor {
async fn provision_monitor(
async fn provision_monitor<T: Topology + HelmCommand>(
&self,
inventory: &Inventory,
topology: &T,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError>;

View File

@ -1,7 +1,5 @@
use url::Url;
#[derive(Debug, Clone)]
pub struct DiscordWebhookAlertChannel {
pub webhook_url: Url,

View File

@ -160,67 +160,65 @@ prometheus:
repository: None,
}
}
// let alertmanager_config = alert_manager_yaml_builder(&config);
// values.push_str(&alertmanager_config);
//
// fn alert_manager_yaml_builder(config: &KubePrometheusConfig) -> String {
// let mut receivers = String::new();
// let mut routes = String::new();
// let mut global_configs = String::new();
// let alert_manager = config.alert_manager;
// for alert_channel in &config.alert_channel {
// match alert_channel {
// AlertChannel::Discord { name, .. } => {
// let (receiver, route) = discord_alert_builder(name);
// info!("discord receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver);
// routes.push_str(&route);
// }
// AlertChannel::Slack {
// slack_channel,
// webhook_url,
// } => {
// let (receiver, route) = slack_alert_builder(slack_channel);
// info!("slack receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver);
//
// routes.push_str(&route);
// let global_config = format!(
// r#"
// global:
// slack_api_url: {webhook_url}"#
// );
//
// global_configs.push_str(&global_config);
// }
// AlertChannel::Smpt { .. } => todo!(),
// }
// }
// info!("after alert receiver: {}", receivers);
// info!("after alert routes: {}", routes);
//
// let alertmanager_config = format!(
// r#"
//alertmanager:
// enabled: {alert_manager}
// config: {global_configs}
// route:
// group_by: ['job']
// group_wait: 30s
// group_interval: 5m
// repeat_interval: 12h
// routes:
//{routes}
// receivers:
// - name: 'null'
//{receivers}"#
// );
//
// info!("alert manager config: {}", alertmanager_config);
// alertmanager_config
// }
// let alertmanager_config = alert_manager_yaml_builder(&config);
// values.push_str(&alertmanager_config);
//
// fn alert_manager_yaml_builder(config: &KubePrometheusConfig) -> String {
// let mut receivers = String::new();
// let mut routes = String::new();
// let mut global_configs = String::new();
// let alert_manager = config.alert_manager;
// for alert_channel in &config.alert_channel {
// match alert_channel {
// AlertChannel::Discord { name, .. } => {
// let (receiver, route) = discord_alert_builder(name);
// info!("discord receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver);
// routes.push_str(&route);
// }
// AlertChannel::Slack {
// slack_channel,
// webhook_url,
// } => {
// let (receiver, route) = slack_alert_builder(slack_channel);
// info!("slack receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver);
//
// routes.push_str(&route);
// let global_config = format!(
// r#"
// global:
// slack_api_url: {webhook_url}"#
// );
//
// global_configs.push_str(&global_config);
// }
// AlertChannel::Smpt { .. } => todo!(),
// }
// }
// info!("after alert receiver: {}", receivers);
// info!("after alert routes: {}", routes);
//
// let alertmanager_config = format!(
// r#"
//alertmanager:
// enabled: {alert_manager}
// config: {global_configs}
// route:
// group_by: ['job']
// group_wait: 30s
// group_interval: 5m
// repeat_interval: 12h
// routes:
//{routes}
// receivers:
// - name: 'null'
//{receivers}"#
// );
//
// info!("alert manager config: {}", alertmanager_config);
// alertmanager_config
// }
//fn discord_alert_builder(release_name: &String) -> (String, String) {
// let discord_receiver_name = format!("Discord-{}", release_name);
@ -234,7 +232,7 @@ prometheus:
// let route = format!(
// r#"
// - receiver: '{discord_receiver_name}'
// matchers:
// matchers:
// - alertname!=Watchdog
// continue: true"#,
// );
@ -255,7 +253,7 @@ prometheus:
// let route = format!(
// r#"
// - receiver: '{slack_receiver_name}'
// matchers:
// matchers:
// - alertname!=Watchdog
// continue: true"#,
// );

View File

@ -77,11 +77,9 @@
// }
//}
//#[async_trait]
//pub trait PrometheusAlertChannel {
// fn get_alert_channel_global_settings(&self) -> Option<AlertManagerChannelGlobalConfigs>;
// fn get_alert_channel_route(&self) -> AlertManagerChannelRoute;
// fn get_alert_channel_receiver(&self) -> AlertManagerChannelReceiver;
//}

View File

@ -1,11 +1,16 @@
use async_trait::async_trait;
use serde::{Serialize, Serializer, ser::SerializeStruct};
use std::{fmt::Debug, sync::Arc};
use crate::{data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, topology::{
oberservability::monitoring::{AlertChannelConfig, Monitor},HelmCommand, Topology
}};
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{
HelmCommand, Topology,
oberservability::monitoring::{AlertChannelConfig, Monitor},
},
};
#[derive(Debug, Clone, Serialize)]
pub struct MonitoringAlertingScore {
@ -37,7 +42,13 @@ impl<T: Topology + HelmCommand + Monitor> Interpret<T> for MonitoringAlertingInt
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
topology.provision_monitor(self.score.alert_channel_configs.clone()).await
topology
.provision_monitor(
inventory,
topology,
self.score.alert_channel_configs.clone(),
)
.await
}
fn get_name(&self) -> InterpretName {
@ -56,5 +67,3 @@ impl<T: Topology + HelmCommand + Monitor> Interpret<T> for MonitoringAlertingInt
todo!()
}
}