refactor: Prometheus alert channel configuration for extensibility to respect OpenClosed, this makes adding new alert channel types easier, requiring only a new trait implementation without modifying core logic

This commit is contained in:
Willem 2025-05-28 12:43:00 -04:00
parent b5c6e1c99d
commit 78aadadd22
14 changed files with 5317 additions and 336 deletions

View File

@ -16,3 +16,5 @@ harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
typetag = "0.2.20"
serde = "1.0.219"

View File

@ -4,9 +4,7 @@ use harmony::{
maestro::Maestro,
modules::{
lamp::{LAMPConfig, LAMPScore},
monitoring::monitoring_alerting::{
AlertChannel, MonitoringAlertingStackScore, WebhookServiceType,
},
monitoring::{kube_prometheus::prometheus_alert_channel::{DiscordChannel, SlackChannel}, monitoring_alerting::MonitoringAlertingScore},
},
topology::{K8sAnywhereTopology, Url},
};
@ -34,28 +32,42 @@ async fn main() {
},
};
// You can choose the type of Topology you want, we suggest starting with the
// K8sAnywhereTopology as it is the most automatic one that enables you to easily deploy
// locally, to development environment from a CI, to staging, and to production with settings
// that automatically adapt to each environment grade.
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize (
Inventory::autoload(),
K8sAnywhereTopology::new(),
)
.await
.unwrap();
let url = url::Url::parse("https://discord.com/api/webhooks/dummy_channel/dummy_token")
.expect("invalid URL");
let url = url::Url::parse(
"https://hooks.slack.com/services/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",
)
.expect("invalid URL");
let mut monitoring_stack_score = MonitoringAlertingStackScore::new();
let mut monitoring_stack_score = MonitoringAlertingScore::new();
monitoring_stack_score.namespace = Some(lamp_stack.config.namespace.clone());
monitoring_stack_score.alert_channel = Some(AlertChannel::WebHookUrl {
url: url,
webhook_service_type: WebhookServiceType::Discord,
});
monitoring_stack_score.alert_channels = vec![(Box::new(SlackChannel {
name: "alert-test".to_string(),
webhook_url: url,})),
(Box::new(DiscordChannel {
name: "discord".to_string(),
webhook_url: url::Url::parse("https://discord.com/api/webhooks/1372994201746276462/YRn4TA9pj8ve3lfmyj1j0Yx97i92gv4U_uavt4CV4_SSIVArYUqfDzMOmzSTic2d8XSL").expect("invalid URL"),}))];
maestro.register_all(vec![Box::new(lamp_stack), Box::new(monitoring_stack_score)]);
//TODO in process of testing
//webhook depricated in MSTeams August 2025
//(AlertChannel::MSTeams {
// connector: "alert-test".to_string(),
// webhook_url: url::Url::parse("").expect("invalid URL"),
//}),
maestro.register_all(vec![Box::new(monitoring_stack_score)]);
// Here we bootstrap the CLI, this gives some nice features if you need them
harmony_cli::init(maestro, None).await.unwrap();
}

View File

@ -49,3 +49,5 @@ fqdn = { version = "0.4.6", features = [
"serde",
] }
temp-dir = "0.1.14"
typetag = "0.2.20"
dyn-clone = "1.0.19"

View File

@ -1,14 +1,16 @@
use serde::Serialize;
use super::AlertChannel;
use super::kube_prometheus::{prometheus_alert_channel::PrometheusAlertChannel, types::AlertManagerValues};
#[derive(Debug, Clone, Serialize)]
pub struct KubePrometheusConfig {
pub struct KubePrometheusChartConfig {
pub namespace: String,
pub default_rules: bool,
pub windows_monitoring: bool,
pub alert_manager: bool,
pub alert_manager_values: AlertManagerValues,
pub node_exporter: bool,
pub prometheus: bool,
pub grafana: bool,
@ -22,16 +24,17 @@ pub struct KubePrometheusConfig {
pub kube_proxy: bool,
pub kube_state_metrics: bool,
pub prometheus_operator: bool,
pub alert_channel: Vec<AlertChannel>,
pub alert_channels: Vec<Box<dyn PrometheusAlertChannel>>,
}
impl KubePrometheusConfig {
impl KubePrometheusChartConfig {
pub fn new() -> Self {
Self {
namespace: "monitoring".into(),
default_rules: true,
windows_monitoring: false,
alert_manager: true,
alert_channel: Vec::new(),
alert_manager_values: AlertManagerValues::default(),
alert_channels: Vec::new(),
grafana: true,
node_exporter: false,
prometheus: true,

View File

@ -1,47 +1,32 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use url::Url;
use crate::modules::helm::chart::HelmChartScore;
use super::AlertChannel;
use super::config::KubePrometheusConfig;
fn get_discord_alert_manager_score(config: &KubePrometheusConfig) -> Option<HelmChartScore> {
let (url, name) = config.alert_channel.iter().find_map(|channel| {
if let AlertChannel::Discord { webhook_url, name } = channel {
Some((webhook_url, name))
} else {
None
}
})?;
pub fn discord_alert_manager_score(name: String, webhook: Url, namespace: String) -> HelmChartScore {
let url = webhook;
let values = format!(
r#"
environment:
- name: "DISCORD_WEBHOOK"
value: "{url}"
"#,
environment:
- name: "DISCORD_WEBHOOK"
value: "{url}"
"#,
);
Some(HelmChartScore {
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str(&name).unwrap(),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/library/alertmanager-discord")
.unwrap(),
chart_name: NonBlankString::from_str(
"oci://hub.nationtech.io/library/alertmanager-discord",
)
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: true,
repository: None,
})
}
pub fn discord_alert_manager_score(config: &KubePrometheusConfig) -> HelmChartScore {
if let Some(chart) = get_discord_alert_manager_score(config) {
chart
} else {
panic!("Expected discord alert manager helm chart");
}
}

View File

@ -1,17 +1,10 @@
use crate::modules::monitoring::kube_prometheus::types::{
AlertChannelReceiver, AlertChannelRoute, AlertManager, AlertManagerConfig,
AlertManagerRoute, AlertManagerValues,
};
use crate::modules::{
helm::chart::HelmChartScore,
monitoring::{config::KubePrometheusConfig, kube_prometheus::traits::AlertEndpoint},
};
use crate::modules::{helm::chart::HelmChartScore, monitoring::config::KubePrometheusChartConfig};
use log::info;
use non_blank_string_rs::NonBlankString;
use serde_yaml::{self};
use std::str::FromStr;
pub fn kube_prometheus_helm_chart_score(config: &KubePrometheusConfig) -> HelmChartScore {
pub fn kube_prometheus_helm_chart_score(config: &KubePrometheusChartConfig) -> HelmChartScore {
//TODO this should be make into a rule with default formatting that can be easily passed as a vec
//to the overrides or something leaving the user to deal with formatting here seems bad
let default_rules = config.default_rules.to_string();
@ -29,6 +22,7 @@ pub fn kube_prometheus_helm_chart_score(config: &KubePrometheusConfig) -> HelmCh
let node_exporter = config.node_exporter.to_string();
let prometheus_operator = config.prometheus_operator.to_string();
let prometheus = config.prometheus.to_string();
let alert_manager_values = config.alert_manager_values.clone();
let mut values = format!(
r#"
additionalPrometheusRulesMap:
@ -151,66 +145,12 @@ prometheus:
"#,
);
let alert_manager_config = build_alert_manager_config(&config);
let alert_manager_yaml = serde_yaml::to_string(&alert_manager_values).expect("Failed to serialize YAML");
values.push_str(&alert_manager_yaml);
fn build_alert_manager_config(config: &KubePrometheusConfig) -> AlertManagerValues {
let mut global_config = None;
let (mut receivers, mut routes): (Vec<_>, Vec<_>) = config
.alert_channel
.iter()
.map(|s| s.build_alert_receiver())
.map(|chan| {
if let Some(global) = chan.global_config {
global_config = Some(global);
}
(chan.receiver, chan.route)
})
.unzip();
receivers.push(AlertChannelReceiver {
name: "null".to_string(),
slack_configs: None,
webhook_configs: None,
});
routes.push(AlertChannelRoute {
receiver: "null".to_string(),
matchers: vec!["alertname=Watchdog".to_string()],
r#continue: false,
});
info!("after alert receiver: {:#?}", receivers);
info!("after alert routes: {:#?}", routes);
let config = AlertManagerConfig {
global: global_config,
route: AlertManagerRoute {
group_by: vec!["job".to_string()],
group_wait: "30s".to_string(),
group_interval: "5m".to_string(),
repeat_interval: "12h".to_string(),
routes,
},
receivers,
};
info!("alert manager config: {:?}", config);
AlertManagerValues {
alertmanager: AlertManager {
enabled: true,
config,
},
}
}
let yaml_config =
serde_yaml::to_string(&alert_manager_config).expect("Failed to serialize YAML");
values.push_str(&yaml_config);
info!("{}", values);
HelmChartScore {
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
release_name: NonBlankString::from_str("kube-prometheus").unwrap(),

View File

@ -1,4 +1,3 @@
pub mod traits;
pub mod kube_prometheus;
pub mod types;
pub mod prometheus_alert_channel;

View File

@ -0,0 +1,140 @@
use crate::{
interpret::InterpretError,
modules::{
helm::chart::HelmChartScore,
monitoring::{
discord_alert_manager::discord_alert_manager_score,
kube_prometheus::types::{
AlertChannelConfig, AlertChannelGlobalConfig, AlertChannelReceiver,
AlertChannelRoute, SlackConfig, WebhookConfig,
},
},
},
};
use dyn_clone::DynClone;
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
use url::Url;
#[typetag::serde(tag = "channel_type")]
#[async_trait::async_trait]
pub trait PrometheusAlertChannel: DynClone + Debug + Send + Sync {
fn get_alert_manager_config_contribution(&self) -> Result<AlertChannelConfig, InterpretError>;
fn get_dependency_score(&self, namespace:String) -> Option<HelmChartScore>;
}
dyn_clone::clone_trait_object!(PrometheusAlertChannel);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiscordChannel {
pub name: String,
pub webhook_url: Url,
}
#[typetag::serde]
impl PrometheusAlertChannel for DiscordChannel {
fn get_alert_manager_config_contribution(&self) -> Result<AlertChannelConfig, InterpretError> {
let service_url = format!("http://{}-alertmanager-discord:9094", &self.name);
Ok(AlertChannelConfig {
receiver: AlertChannelReceiver {
name: format!("Discord-{}", self.name),
slack_configs: None,
webhook_configs: Some(vec![WebhookConfig {
url: url::Url::parse(&service_url)
.expect("invalid url"),
send_resolved: true,
}]),
},
route: AlertChannelRoute {
receiver: format!("Discord-{}", self.name),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: None,
})
}
fn get_dependency_score(&self, namespace: String) -> Option<HelmChartScore> {
Some(discord_alert_manager_score(self.name.clone(), self.webhook_url.clone(), namespace.clone()))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SlackChannel {
pub name: String,
pub webhook_url: Url,
}
#[typetag::serde]
impl PrometheusAlertChannel for SlackChannel {
fn get_alert_manager_config_contribution(&self) -> Result<AlertChannelConfig, InterpretError> {
Ok(AlertChannelConfig {
receiver: AlertChannelReceiver {
name: format!("Slack-{}", self.name),
slack_configs: Some(vec![SlackConfig {
channel: self.name.clone(),
send_resolved: true,
title: "{{ .CommonAnnotations.title }}".to_string(),
text: ">-
*Alert:* {{ .CommonLabels.alertname }}
*Severity:* {{ .CommonLabels.severity }}
*Namespace:* {{ .CommonLabels.namespace }}
*Pod:* {{ .CommonLabels.pod }}
*ExternalURL:* {{ .ExternalURL }}
{{ range .Alerts }}
*Instance:* {{ .Labels.instance }}
*Summary:* {{ .Annotations.summary }}
*Description:* {{ .Annotations.description }}
*Starts At:* {{ .StartsAt }}
*Status:* {{ .Status }}
{{ end }}"
.to_string(),
}]),
webhook_configs: None,
},
route: AlertChannelRoute {
receiver: format!("Slack-{}", self.name),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: Some(AlertChannelGlobalConfig {
slack_api_url: Some(self.webhook_url.clone()),
}),
})
}
fn get_dependency_score(&self, _namespace: String) -> Option<HelmChartScore> {
None
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NullReceiver {}
impl NullReceiver {
pub fn new() -> Self {
Self {}
}
}
#[typetag::serde]
impl PrometheusAlertChannel for NullReceiver {
fn get_alert_manager_config_contribution(&self) -> Result<AlertChannelConfig, InterpretError> {
Ok(AlertChannelConfig {
receiver: AlertChannelReceiver {
name: "null".to_string(),
slack_configs: None,
webhook_configs: None,
},
route: AlertChannelRoute {
receiver: "null".to_string(),
matchers: vec!["alertname=Watchdog".to_string()],
r#continue: false,
},
global_config: None,
})
}
fn get_dependency_score(&self, _namespace: String) -> Option<HelmChartScore> {
None
}
}

View File

@ -1,92 +0,0 @@
use crate::modules::monitoring::AlertChannel;
use super::types::{AlertChannelConfig, AlertChannelGlobalConfig, AlertChannelReceiver, AlertChannelRoute, SlackConfig, WebhookConfig};
pub trait AlertEndpoint {
//fn register_webhook(&self, webhook_url: Url);
fn build_alert_receiver(&self) -> AlertChannelConfig;
}
impl AlertEndpoint for AlertChannel {
fn build_alert_receiver(&self) -> AlertChannelConfig {
match self {
AlertChannel::Discord { name, .. } => AlertChannelConfig {
receiver: AlertChannelReceiver {
name: format!("Discord-{name}"),
slack_configs: None,
webhook_configs: Some(vec![WebhookConfig {
url: url::Url::parse("http://{name}-alertmanager-discord:9094")
.expect("invalid url"),
send_resolved: true,
}]),
},
route: AlertChannelRoute {
receiver: format!("Discord-{name}"),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: None,
},
AlertChannel::Slack {
slack_channel,
webhook_url,
} => AlertChannelConfig {
receiver: AlertChannelReceiver {
name: format!("Slack-{slack_channel}"),
slack_configs: Some(vec![SlackConfig {
channel: slack_channel.clone(),
send_resolved: true,
title: "{{ .CommonAnnotations.title }}".to_string(),
text: ">-
*Alert:* {{ .CommonLabels.alertname }}
*Severity:* {{ .CommonLabels.severity }}
*Namespace:* {{ .CommonLabels.namespace }}
*Pod:* {{ .CommonLabels.pod }}
*ExternalURL:* {{ .ExternalURL }}
{{ range .Alerts }}
*Instance:* {{ .Labels.instance }}
*Summary:* {{ .Annotations.summary }}
*Description:* {{ .Annotations.description }}
*Starts At:* {{ .StartsAt }}
*Status:* {{ .Status }}
{{ end }}".to_string()
}]),
webhook_configs: None,
},
route: AlertChannelRoute {
receiver: format!("Slack-{slack_channel}"),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: Some(AlertChannelGlobalConfig {
slack_api_url: Some(webhook_url.clone()),
}),
},
AlertChannel::MSTeams {
connector, ..
} => AlertChannelConfig{
receiver: AlertChannelReceiver{
name: format!("MSTeams-{connector}"),
slack_configs: None,
webhook_configs: Some(vec![WebhookConfig{
url: url::Url::parse("http://prometheus-msteams-prometheus-msteams.monitoring.svc.cluster.local:2000/alertmanager").expect("invalid url"),
send_resolved: true,}])
},
route: AlertChannelRoute{
receiver: format!("MSTeams-{connector}"),
matchers: vec!["alertname!=Watchdog".to_string()],
r#continue: true,
},
global_config: None, },
AlertChannel::Smpt {
email_address,
service_name,
} => todo!(),
}
}
}

View File

@ -1,25 +1,25 @@
use serde::{Deserialize, Serialize};
use url::Url;
#[derive(Debug, Serialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertManagerValues {
pub alertmanager: AlertManager,
}
#[derive(Debug, Serialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertManager {
pub enabled: bool,
pub config: AlertManagerConfig,
}
#[derive(Debug)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AlertChannelConfig {
pub receiver: AlertChannelReceiver,
pub route: AlertChannelRoute,
pub global_config: Option<AlertChannelGlobalConfig>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertChannelReceiver {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
@ -28,7 +28,7 @@ pub struct AlertChannelReceiver {
pub webhook_configs: Option<Vec<WebhookConfig>>,
}
#[derive(Debug, Serialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertManagerRoute {
pub group_by: Vec<String>,
pub group_wait: String,
@ -37,13 +37,13 @@ pub struct AlertManagerRoute {
pub routes: Vec<AlertChannelRoute>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertChannelGlobalConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub slack_api_url: Option<Url>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SlackConfig {
pub channel: String,
pub send_resolved: bool,
@ -51,13 +51,13 @@ pub struct SlackConfig {
pub text: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebhookConfig {
pub url: Url,
pub send_resolved: bool,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertChannelRoute {
pub receiver: String,
pub matchers: Vec<String>,
@ -65,9 +65,30 @@ pub struct AlertChannelRoute {
pub r#continue: bool,
}
#[derive(Debug, Serialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertManagerConfig {
pub global: Option<AlertChannelGlobalConfig>,
pub route: AlertManagerRoute,
pub receivers: Vec<AlertChannelReceiver>,
}
impl AlertManagerValues {
pub fn default() -> Self {
Self {
alertmanager: AlertManager {
enabled: true,
config: AlertManagerConfig {
global: None,
route: AlertManagerRoute {
group_by: vec!["job".to_string()],
group_wait: "30s".to_string(),
group_interval: "5m".to_string(),
repeat_interval: "12h".to_string(),
routes: vec![AlertChannelRoute{ receiver: "null".to_string(), matchers: vec!["alertname=Watchdog".to_string()], r#continue: false }],
},
receivers: vec![AlertChannelReceiver{ name: "null".to_string(), slack_configs: None, webhook_configs: None }],
},
},
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +1,5 @@
use email_address::EmailAddress;
use serde::Serialize;
use url::Url;
mod config;
mod discord_alert_manager;
pub mod kube_prometheus;
pub mod monitoring_alerting;
mod prometheus_msteams;
#[derive(Debug, Clone, Serialize)]
pub enum AlertChannel {
Discord {
name: String,
webhook_url: Url,
},
Slack {
slack_channel: String,
webhook_url: Url,
},
MSTeams {
connector: String,
webhook_url: Url,
},
//TODO test and implement in helm chart
//currently does not work
Smpt {
email_address: EmailAddress,
service_name: String,
},
}

View File

@ -1,41 +1,46 @@
use async_trait::async_trait;
use email_address::EmailAddress;
use log::info;
use serde::Serialize;
use url::Url;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::monitoring::kube_prometheus::types::{
AlertManager, AlertManagerConfig, AlertManagerRoute,
},
score::Score,
topology::{HelmCommand, Topology},
};
use super::{
config::KubePrometheusConfig, discord_alert_manager::discord_alert_manager_score, kube_prometheus::kube_prometheus::kube_prometheus_helm_chart_score, prometheus_msteams::prometheus_msteams_score, AlertChannel
config::KubePrometheusChartConfig,
kube_prometheus::{
kube_prometheus::kube_prometheus_helm_chart_score,
prometheus_alert_channel::{NullReceiver, PrometheusAlertChannel},
types::AlertManagerValues,
},
};
#[derive(Debug, Clone, Serialize)]
pub struct MonitoringAlertingStackScore {
pub alert_channel: Vec<AlertChannel>,
pub struct MonitoringAlertingScore {
pub alert_channels: Vec<Box<dyn PrometheusAlertChannel>>,
pub namespace: Option<String>,
}
impl MonitoringAlertingStackScore {
impl MonitoringAlertingScore {
pub fn new() -> Self {
Self {
alert_channel: Vec::new(),
alert_channels: Vec::new(),
namespace: None,
}
}
}
impl<T: Topology + HelmCommand> Score<T> for MonitoringAlertingStackScore {
impl<T: Topology + HelmCommand> Score<T> for MonitoringAlertingScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(MonitoringAlertingStackInterpret {
Box::new(MonitoringAlertingInterpret {
score: self.clone(),
})
}
@ -45,17 +50,61 @@ impl<T: Topology + HelmCommand> Score<T> for MonitoringAlertingStackScore {
}
#[derive(Debug, Clone, Serialize)]
struct MonitoringAlertingStackInterpret {
score: MonitoringAlertingStackScore,
struct MonitoringAlertingInterpret {
score: MonitoringAlertingScore,
}
impl MonitoringAlertingStackInterpret {
async fn build_kube_prometheus_helm_chart_config(&self) -> KubePrometheusConfig {
let mut config = KubePrometheusConfig::new();
impl MonitoringAlertingInterpret {
async fn build_kube_prometheus_helm_chart_config(&self) -> KubePrometheusChartConfig {
let mut config = KubePrometheusChartConfig::new();
let mut receivers = Vec::new();
let mut routes = Vec::new();
let mut global_config = None;
if let Some(ns) = &self.score.namespace {
config.namespace = ns.clone();
};
let null_channel = NullReceiver::new();
let null_channel = null_channel
.get_alert_manager_config_contribution()
.unwrap();
receivers.push(null_channel.receiver);
routes.push(null_channel.route);
for channel in self.score.alert_channels.clone() {
let alert_manager_config_contribution =
channel.get_alert_manager_config_contribution().unwrap();
receivers.push(alert_manager_config_contribution.receiver);
routes.push(alert_manager_config_contribution.route);
if let Some(global) = alert_manager_config_contribution.global_config {
global_config = Some(global);
}
}
config.alert_channel = self.score.alert_channel.clone();
info!("after alert receiver: {:#?}", receivers);
info!("after alert routes: {:#?}", routes);
let alert_manager_config = AlertManagerConfig {
global: global_config,
route: AlertManagerRoute {
group_by: vec!["job".to_string()],
group_wait: "30s".to_string(),
group_interval: "5m".to_string(),
repeat_interval: "12h".to_string(),
routes,
},
receivers,
};
info!("alert manager config: {:?}", config);
config.alert_manager_values = AlertManagerValues {
alertmanager: AlertManager {
enabled: true,
config: alert_manager_config,
},
};
config
}
@ -63,7 +112,7 @@ impl MonitoringAlertingStackInterpret {
&self,
inventory: &Inventory,
topology: &T,
config: &KubePrometheusConfig,
config: &KubePrometheusChartConfig,
) -> Result<Outcome, InterpretError> {
let helm_chart = kube_prometheus_helm_chart_score(config);
helm_chart
@ -72,39 +121,29 @@ impl MonitoringAlertingStackInterpret {
.await
}
async fn deploy_alert_channel_service<T: Topology + HelmCommand>(
async fn deploy_alert_channel_dependencies<T: Topology + HelmCommand>(
&self,
inventory: &Inventory,
topology: &T,
config: &KubePrometheusConfig,
config: &KubePrometheusChartConfig,
) -> Result<Outcome, InterpretError> {
let mut outcomes = vec![];
let mut outcomes = Vec::new();
for channel in &self.score.alert_channel {
let outcome = match channel {
AlertChannel::Discord { .. } => {
discord_alert_manager_score(config)
.create_interpret()
.execute(inventory, topology)
.await
for channel in &self.score.alert_channels {
let ns = config.namespace.clone();
if let Some(dependency_score) = channel.get_dependency_score(ns) {
match dependency_score
.create_interpret()
.execute(inventory, topology)
.await
{
Ok(outcome) => outcomes.push(outcome),
Err(e) => {
info!("failed to deploy dependency: {}", { &e });
return Err(e);
}
}
AlertChannel::Slack { .. } => Ok(Outcome::success(
"No extra configs for slack alerting".to_string(),
)),
AlertChannel::MSTeams { .. } => {
prometheus_msteams_score(config)
.create_interpret()
.execute(inventory, topology)
.await
}
AlertChannel::Smpt { .. } => {
todo!()
}
};
outcomes.push(outcome);
}
for result in outcomes {
result?;
}
}
Ok(Outcome::success("All alert channels deployed".to_string()))
@ -112,22 +151,22 @@ impl MonitoringAlertingStackInterpret {
}
#[async_trait]
impl<T: Topology + HelmCommand> Interpret<T> for MonitoringAlertingStackInterpret {
impl<T: Topology + HelmCommand> Interpret<T> for MonitoringAlertingInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let config = self.build_kube_prometheus_helm_chart_config().await;
info!("Built kube prometheus config");
info!("Built kube prometheus config{:?}", config);
info!("Installing kube prometheus chart");
self.deploy_kube_prometheus_helm_chart_score(inventory, topology, &config)
.await?;
info!("Installing alert channel service");
self.deploy_alert_channel_service(inventory, topology, &config)
self.deploy_alert_channel_dependencies(inventory, topology, &config)
.await?;
Ok(Outcome::success(format!(
"succesfully deployed monitoring and alerting stack"
"succesfully deployed monitoring and alerting score"
)))
}

View File

@ -1,46 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
use super::{config::KubePrometheusConfig, AlertChannel};
fn build_prometheus_msteams_score(config: &KubePrometheusConfig) -> Option<HelmChartScore> {
let (url, name) = config.alert_channel.iter().find_map(|channel| {
if let AlertChannel::MSTeams { webhook_url, connector } = channel {
Some((webhook_url, connector))
} else {
None
}
})?;
let values = format!(
r#"
connectors:
- default: "{url}"
"#,
);
Some(HelmChartScore {
namespace: Some(NonBlankString::from_str(&config.namespace).unwrap()),
release_name: NonBlankString::from_str(&name).unwrap(),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/library/prometheus-msteams")
.unwrap(),
chart_version: None,
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: true,
repository: None,
})
}
pub fn prometheus_msteams_score(config: &KubePrometheusConfig) -> HelmChartScore {
if let Some(chart) = build_prometheus_msteams_score(config) {
chart
} else {
panic!("Expected discord alert manager helm chart");
}
}