feat: depoloys cluster monitoring stack from monitoring score on k8sanywhere topology
All checks were successful
Run Check Script / check (push) Successful in 1m46s
Run Check Script / check (pull_request) Successful in 1m47s

This commit is contained in:
Willem 2025-06-11 15:06:39 -04:00
parent 238d1f85e2
commit 613def5e0b
10 changed files with 154 additions and 98 deletions

View File

@ -0,0 +1,12 @@
[package]
name = "webhook_sender"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true
url.workspace = true

View File

@ -0,0 +1,23 @@
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::monitoring::monitoring_alerting::MonitoringAlertingScore,
topology::{K8sAnywhereTopology, oberservability::K8sMonitorConfig},
};
#[tokio::main]
async fn main() {
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
let monitoring = MonitoringAlertingScore {
alert_channel_configs: None,
};
maestro.register_all(vec![Box::new(monitoring)]);
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -10,7 +10,10 @@ use crate::{
interpret::{InterpretError, Outcome}, interpret::{InterpretError, Outcome},
inventory::Inventory, inventory::Inventory,
maestro::Maestro, maestro::Maestro,
modules::{k3d::K3DInstallationScore, monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score}, modules::{
k3d::K3DInstallationScore,
monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
},
topology::LocalhostTopology, topology::LocalhostTopology,
}; };
@ -193,9 +196,7 @@ impl K8sAnywhereTopology {
self.k8s_monitor self.k8s_monitor
.get_or_try_init(async || -> Result<K8sMonitor, String> { .get_or_try_init(async || -> Result<K8sMonitor, String> {
let config = K8sMonitorConfig { let config = K8sMonitorConfig::cluster_monitor();
chart: kube_prometheus_helm_chart_score(),
};
Ok(K8sMonitor { config }) Ok(K8sMonitor { config })
}) })
.await .await
@ -302,12 +303,14 @@ impl TenantManager for K8sAnywhereTopology {
} }
#[async_trait] #[async_trait]
impl Monitor for K8sAnywhereTopology { impl Monitor for K8sAnywhereTopology {
async fn provision_monitor( async fn provision_monitor<T: Topology + HelmCommand>(
&self, &self,
inventory: &Inventory,
topology: &T,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>, alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
self.get_k8s_monitor()? self.get_k8s_monitor()?
.provision_monitor(alert_receivers) .provision_monitor(inventory, topology, alert_receivers)
.await .await
} }

View File

@ -3,14 +3,13 @@ use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use serde::Serialize; use serde::Serialize;
use crate::score::Score;
use crate::topology::HelmCommand;
use crate::{ use crate::{
interpret::{InterpretError, Outcome}, interpret::{InterpretError, Outcome},
inventory::Inventory, inventory::Inventory,
modules::{helm::chart::HelmChartInterpret, monitoring::kube_prometheus::{ topology::Topology,
config::KubePrometheusConfig,
kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
}},
topology::{K8sAnywhereTopology, Topology},
}; };
use super::{ use super::{
@ -25,19 +24,23 @@ pub struct K8sMonitor {
#[async_trait] #[async_trait]
impl Monitor for K8sMonitor { impl Monitor for K8sMonitor {
async fn provision_monitor( async fn provision_monitor<T: Topology + HelmCommand>(
&self, &self,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>, inventory: &Inventory,
topology: &T,
alert_channels: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
if let Some(receivers) = alert_receivers { if let Some(channels) = alert_channels {
let alert_channels = self.build_alert_channels(receivers).await?; let alert_channels = self.build_alert_channels(channels).await?;
for channel in alert_channels { for channel in alert_channels {
channel.register_alert_channel().await?; channel.register_alert_channel().await?;
} }
} }
let chart = self.config.chart.clone(); let chart = self.config.chart.clone();
// chart
chart.create_interpret(); .create_interpret()
.execute(inventory, topology)
.await?;
Ok(Outcome::success("installed monitor".to_string())) Ok(Outcome::success("installed monitor".to_string()))
} }

View File

@ -1,12 +1,23 @@
use serde::Serialize; use serde::Serialize;
use crate::modules::helm::chart::HelmChartScore; use crate::modules::{
helm::chart::HelmChartScore,
monitoring::kube_prometheus::kube_prometheus_helm_chart_score::kube_prometheus_helm_chart_score,
};
pub mod monitoring;
pub mod k8s; pub mod k8s;
pub mod monitoring;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct K8sMonitorConfig { pub struct K8sMonitorConfig {
//probably need to do something better here //probably need to do something better here
pub chart: HelmChartScore, pub chart: HelmChartScore,
} }
impl K8sMonitorConfig {
pub fn cluster_monitor() -> Self {
Self {
chart: kube_prometheus_helm_chart_score(),
}
}
}

View File

@ -1,13 +1,12 @@
use async_trait::async_trait; use async_trait::async_trait;
use dyn_clone::DynClone; use dyn_clone::DynClone;
use serde::Serialize;
use std::fmt::Debug; use std::fmt::Debug;
use std::sync::Arc;
use crate::executors::ExecutorError; use crate::executors::ExecutorError;
use crate::interpret::InterpretError; use crate::interpret::InterpretError;
use crate::inventory::Inventory; use crate::inventory::Inventory;
use crate::topology::HelmCommand;
use crate::{interpret::Outcome, topology::Topology}; use crate::{interpret::Outcome, topology::Topology};
/// Represents an entity responsible for collecting and organizing observability data /// Represents an entity responsible for collecting and organizing observability data
@ -16,8 +15,10 @@ use crate::{interpret::Outcome, topology::Topology};
/// monitoring data, enabling consistent processing regardless of the underlying data source. /// monitoring data, enabling consistent processing regardless of the underlying data source.
#[async_trait] #[async_trait]
pub trait Monitor { pub trait Monitor {
async fn provision_monitor( async fn provision_monitor<T: Topology + HelmCommand>(
&self, &self,
inventory: &Inventory,
topology: &T,
alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>, alert_receivers: Option<Vec<Box<dyn AlertChannelConfig>>>,
) -> Result<Outcome, InterpretError>; ) -> Result<Outcome, InterpretError>;

View File

@ -1,7 +1,5 @@
use url::Url; use url::Url;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct DiscordWebhookAlertChannel { pub struct DiscordWebhookAlertChannel {
pub webhook_url: Url, pub webhook_url: Url,

View File

@ -160,67 +160,65 @@ prometheus:
repository: None, repository: None,
} }
} }
// let alertmanager_config = alert_manager_yaml_builder(&config); // let alertmanager_config = alert_manager_yaml_builder(&config);
// values.push_str(&alertmanager_config); // values.push_str(&alertmanager_config);
// //
// fn alert_manager_yaml_builder(config: &KubePrometheusConfig) -> String { // fn alert_manager_yaml_builder(config: &KubePrometheusConfig) -> String {
// let mut receivers = String::new(); // let mut receivers = String::new();
// let mut routes = String::new(); // let mut routes = String::new();
// let mut global_configs = String::new(); // let mut global_configs = String::new();
// let alert_manager = config.alert_manager; // let alert_manager = config.alert_manager;
// for alert_channel in &config.alert_channel { // for alert_channel in &config.alert_channel {
// match alert_channel { // match alert_channel {
// AlertChannel::Discord { name, .. } => { // AlertChannel::Discord { name, .. } => {
// let (receiver, route) = discord_alert_builder(name); // let (receiver, route) = discord_alert_builder(name);
// info!("discord receiver: {} \nroute: {}", receiver, route); // info!("discord receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver); // receivers.push_str(&receiver);
// routes.push_str(&route); // routes.push_str(&route);
// } // }
// AlertChannel::Slack { // AlertChannel::Slack {
// slack_channel, // slack_channel,
// webhook_url, // webhook_url,
// } => { // } => {
// let (receiver, route) = slack_alert_builder(slack_channel); // let (receiver, route) = slack_alert_builder(slack_channel);
// info!("slack receiver: {} \nroute: {}", receiver, route); // info!("slack receiver: {} \nroute: {}", receiver, route);
// receivers.push_str(&receiver); // receivers.push_str(&receiver);
// //
// routes.push_str(&route); // routes.push_str(&route);
// let global_config = format!( // let global_config = format!(
// r#" // r#"
// global: // global:
// slack_api_url: {webhook_url}"# // slack_api_url: {webhook_url}"#
// ); // );
// //
// global_configs.push_str(&global_config); // global_configs.push_str(&global_config);
// } // }
// AlertChannel::Smpt { .. } => todo!(), // AlertChannel::Smpt { .. } => todo!(),
// } // }
// } // }
// info!("after alert receiver: {}", receivers); // info!("after alert receiver: {}", receivers);
// info!("after alert routes: {}", routes); // info!("after alert routes: {}", routes);
// //
// let alertmanager_config = format!( // let alertmanager_config = format!(
// r#" // r#"
//alertmanager: //alertmanager:
// enabled: {alert_manager} // enabled: {alert_manager}
// config: {global_configs} // config: {global_configs}
// route: // route:
// group_by: ['job'] // group_by: ['job']
// group_wait: 30s // group_wait: 30s
// group_interval: 5m // group_interval: 5m
// repeat_interval: 12h // repeat_interval: 12h
// routes: // routes:
//{routes} //{routes}
// receivers: // receivers:
// - name: 'null' // - name: 'null'
//{receivers}"# //{receivers}"#
// ); // );
// //
// info!("alert manager config: {}", alertmanager_config); // info!("alert manager config: {}", alertmanager_config);
// alertmanager_config // alertmanager_config
// } // }
//fn discord_alert_builder(release_name: &String) -> (String, String) { //fn discord_alert_builder(release_name: &String) -> (String, String) {
// let discord_receiver_name = format!("Discord-{}", release_name); // let discord_receiver_name = format!("Discord-{}", release_name);

View File

@ -77,11 +77,9 @@
// } // }
//} //}
//#[async_trait] //#[async_trait]
//pub trait PrometheusAlertChannel { //pub trait PrometheusAlertChannel {
// fn get_alert_channel_global_settings(&self) -> Option<AlertManagerChannelGlobalConfigs>; // fn get_alert_channel_global_settings(&self) -> Option<AlertManagerChannelGlobalConfigs>;
// fn get_alert_channel_route(&self) -> AlertManagerChannelRoute; // fn get_alert_channel_route(&self) -> AlertManagerChannelRoute;
// fn get_alert_channel_receiver(&self) -> AlertManagerChannelReceiver; // fn get_alert_channel_receiver(&self) -> AlertManagerChannelReceiver;
//} //}

View File

@ -1,11 +1,16 @@
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Serialize, Serializer, ser::SerializeStruct}; use serde::Serialize;
use std::{fmt::Debug, sync::Arc};
use crate::{data::{Id, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, topology::{
oberservability::monitoring::{AlertChannelConfig, Monitor},HelmCommand, Topology
}};
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{
HelmCommand, Topology,
oberservability::monitoring::{AlertChannelConfig, Monitor},
},
};
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct MonitoringAlertingScore { pub struct MonitoringAlertingScore {
@ -37,7 +42,13 @@ impl<T: Topology + HelmCommand + Monitor> Interpret<T> for MonitoringAlertingInt
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
topology.provision_monitor(self.score.alert_channel_configs.clone()).await topology
.provision_monitor(
inventory,
topology,
self.score.alert_channel_configs.clone(),
)
.await
} }
fn get_name(&self) -> InterpretName { fn get_name(&self) -> InterpretName {
@ -56,5 +67,3 @@ impl<T: Topology + HelmCommand + Monitor> Interpret<T> for MonitoringAlertingInt
todo!() todo!()
} }
} }