Compare commits
No commits in common. "98f3f82ad5913d2401725959777fa2e3ea4a35fd" and "a19b52e6909098afb81a1b413b3850830059321c" have entirely different histories.
98f3f82ad5
...
a19b52e690
34
Cargo.lock
generated
34
Cargo.lock
generated
@ -1752,7 +1752,6 @@ dependencies = [
|
|||||||
"non-blank-string-rs",
|
"non-blank-string-rs",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
"pretty_assertions",
|
|
||||||
"rand 0.9.1",
|
"rand 0.9.1",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
@ -1761,7 +1760,6 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde-value",
|
"serde-value",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with",
|
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
"similar",
|
"similar",
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
@ -4082,18 +4080,6 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "schemars"
|
|
||||||
version = "1.0.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d"
|
|
||||||
dependencies = [
|
|
||||||
"dyn-clone",
|
|
||||||
"ref-cast",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "scopeguard"
|
name = "scopeguard"
|
||||||
version = "1.2.0"
|
version = "1.2.0"
|
||||||
@ -4294,36 +4280,22 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_with"
|
name = "serde_with"
|
||||||
version = "3.14.0"
|
version = "3.13.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
|
checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
"hex",
|
"hex",
|
||||||
"indexmap 1.9.3",
|
"indexmap 1.9.3",
|
||||||
"indexmap 2.10.0",
|
"indexmap 2.10.0",
|
||||||
"schemars 0.9.0",
|
"schemars",
|
||||||
"schemars 1.0.3",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with_macros",
|
|
||||||
"time",
|
"time",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "serde_with_macros"
|
|
||||||
version = "3.14.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
|
|
||||||
dependencies = [
|
|
||||||
"darling",
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"syn",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_yaml"
|
name = "serde_yaml"
|
||||||
version = "0.9.34+deprecated"
|
version = "0.9.34+deprecated"
|
||||||
|
|||||||
@ -52,4 +52,3 @@ convert_case = "0.8"
|
|||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
similar = "2"
|
similar = "2"
|
||||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||||
pretty_assertions = "1.4.1"
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::HttpScore,
|
||||||
ipxe::IpxeScore,
|
ipxe::IpxeScore,
|
||||||
okd::{
|
okd::{
|
||||||
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
bootstrap_dhcp::OKDBootstrapDhcpScore,
|
||||||
@ -126,7 +126,7 @@ async fn main() {
|
|||||||
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
let http_score = HttpScore::new(Url::LocalFolder(
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
let ipxe_score = IpxeScore::new();
|
let ipxe_score = IpxeScore::new();
|
||||||
|
|||||||
@ -11,7 +11,7 @@ use harmony::{
|
|||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
http::StaticFilesHttpScore,
|
http::HttpScore,
|
||||||
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
|
||||||
opnsense::OPNsenseShellCommandScore,
|
opnsense::OPNsenseShellCommandScore,
|
||||||
tftp::TftpScore,
|
tftp::TftpScore,
|
||||||
@ -81,7 +81,7 @@ async fn main() {
|
|||||||
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
|
||||||
|
|
||||||
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
|
||||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
let http_score = HttpScore::new(Url::LocalFolder(
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
|
|||||||
@ -4,8 +4,7 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
modules::application::{
|
modules::application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp, features::ContinuousDelivery,
|
||||||
features::{ContinuousDelivery, Monitoring},
|
|
||||||
},
|
},
|
||||||
topology::{K8sAnywhereTopology, Url},
|
topology::{K8sAnywhereTopology, Url},
|
||||||
};
|
};
|
||||||
@ -25,7 +24,6 @@ async fn main() {
|
|||||||
Box::new(ContinuousDelivery {
|
Box::new(ContinuousDelivery {
|
||||||
application: application.clone(),
|
application: application.clone(),
|
||||||
}),
|
}),
|
||||||
Box::new(Monitoring {}),
|
|
||||||
// TODO add monitoring, backups, multisite ha, etc
|
// TODO add monitoring, backups, multisite ha, etc
|
||||||
],
|
],
|
||||||
application,
|
application,
|
||||||
|
|||||||
@ -58,7 +58,3 @@ futures-util = "0.3.31"
|
|||||||
tokio-util = "0.7.15"
|
tokio-util = "0.7.15"
|
||||||
strum = { version = "0.27.1", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.20.0"
|
||||||
serde_with = "3.14.0"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
pretty_assertions.workspace = true
|
|
||||||
|
|||||||
@ -3,13 +3,11 @@ use std::{backtrace, collections::HashMap};
|
|||||||
use k8s_openapi::{Metadata, NamespaceResourceScope, Resource};
|
use k8s_openapi::{Metadata, NamespaceResourceScope, Resource};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_with::skip_serializing_none;
|
|
||||||
use serde_yaml::Value;
|
use serde_yaml::Value;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use crate::modules::application::features::CDApplicationConfig;
|
use crate::modules::application::features::CDApplicationConfig;
|
||||||
|
|
||||||
#[skip_serializing_none]
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Helm {
|
pub struct Helm {
|
||||||
@ -29,11 +27,9 @@ pub struct Helm {
|
|||||||
pub namespace: Option<String>,
|
pub namespace: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[skip_serializing_none]
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Source {
|
pub struct Source {
|
||||||
#[serde(rename = "repoURL")]
|
|
||||||
pub repo_url: Url,
|
pub repo_url: Url,
|
||||||
pub target_revision: Option<String>,
|
pub target_revision: Option<String>,
|
||||||
pub chart: String,
|
pub chart: String,
|
||||||
@ -71,7 +67,6 @@ pub struct SyncPolicy {
|
|||||||
pub retry: Retry,
|
pub retry: Retry,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[skip_serializing_none]
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ArgoApplication {
|
pub struct ArgoApplication {
|
||||||
@ -140,7 +135,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
|||||||
source: Source {
|
source: Source {
|
||||||
repo_url: Url::parse(value.helm_chart_repo_url.to_string().as_str())
|
repo_url: Url::parse(value.helm_chart_repo_url.to_string().as_str())
|
||||||
.expect("couldn't convert to URL"),
|
.expect("couldn't convert to URL"),
|
||||||
target_revision: Some(value.version.to_string()),
|
target_revision: None,
|
||||||
chart: value.helm_chart_name,
|
chart: value.helm_chart_name,
|
||||||
helm: Helm {
|
helm: Helm {
|
||||||
pass_credentials: None,
|
pass_credentials: None,
|
||||||
@ -150,7 +145,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
|||||||
value_files: vec![],
|
value_files: vec![],
|
||||||
ignore_missing_value_files: None,
|
ignore_missing_value_files: None,
|
||||||
values: None,
|
values: None,
|
||||||
values_object: value.values_overrides,
|
values_object: Some(value.values_overrides),
|
||||||
skip_crds: None,
|
skip_crds: None,
|
||||||
skip_schema_validation: None,
|
skip_schema_validation: None,
|
||||||
version: None,
|
version: None,
|
||||||
@ -257,7 +252,6 @@ spec:
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use crate::modules::application::features::{
|
use crate::modules::application::features::{
|
||||||
@ -321,14 +315,24 @@ spec:
|
|||||||
server: https://kubernetes.default.svc
|
server: https://kubernetes.default.svc
|
||||||
namespace: test-ns
|
namespace: test-ns
|
||||||
source:
|
source:
|
||||||
repoURL: http://test/
|
repoUrl: http://test/
|
||||||
|
targetRevision: null
|
||||||
chart: test-chart
|
chart: test-chart
|
||||||
helm:
|
helm:
|
||||||
|
passCredentials: null
|
||||||
parameters: []
|
parameters: []
|
||||||
fileParameters: []
|
fileParameters: []
|
||||||
releaseName: test-release-neame
|
releaseName: test-release-neame
|
||||||
valueFiles: []
|
valueFiles: []
|
||||||
|
ignoreMissingValueFiles: null
|
||||||
|
values: null
|
||||||
|
valuesObject: null
|
||||||
|
skipCrds: null
|
||||||
|
skipSchemaValidation: null
|
||||||
|
version: null
|
||||||
|
kubeVersion: null
|
||||||
apiVersions: []
|
apiVersions: []
|
||||||
|
namespace: null
|
||||||
syncPolicy:
|
syncPolicy:
|
||||||
automated:
|
automated:
|
||||||
prune: false
|
prune: false
|
||||||
|
|||||||
@ -161,7 +161,6 @@ impl<
|
|||||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||||
info!("Pushed new helm chart {helm_chart}");
|
info!("Pushed new helm chart {helm_chart}");
|
||||||
|
|
||||||
error!("TODO Make building image configurable/skippable");
|
|
||||||
let image = self.application.build_push_oci_image().await?;
|
let image = self.application.build_push_oci_image().await?;
|
||||||
info!("Pushed new docker image {image}");
|
info!("Pushed new docker image {image}");
|
||||||
|
|
||||||
@ -195,7 +194,7 @@ impl<
|
|||||||
version: Version::from("0.1.0").unwrap(),
|
version: Version::from("0.1.0").unwrap(),
|
||||||
helm_chart_repo_url: Url::Url(url::Url::parse("oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart").unwrap()),
|
helm_chart_repo_url: Url::Url(url::Url::parse("oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart/harmony-example-rust-webapp-chart").unwrap()),
|
||||||
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||||
values_overrides: None,
|
values_overrides: Value::Null,
|
||||||
name: "harmony-demo-rust-webapp".to_string(),
|
name: "harmony-demo-rust-webapp".to_string(),
|
||||||
namespace: "harmonydemo-staging".to_string(),
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
})],
|
})],
|
||||||
@ -227,7 +226,7 @@ pub struct CDApplicationConfig {
|
|||||||
pub version: Version,
|
pub version: Version,
|
||||||
pub helm_chart_repo_url: Url,
|
pub helm_chart_repo_url: Url,
|
||||||
pub helm_chart_name: String,
|
pub helm_chart_name: String,
|
||||||
pub values_overrides: Option<Value>,
|
pub values_overrides: Value,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use k8s_openapi::Resource;
|
use k8s_openapi::Resource;
|
||||||
use log::error;
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -51,7 +50,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
error!("Uncomment below, only disabled for debugging");
|
|
||||||
self.score
|
self.score
|
||||||
.create_interpret()
|
.create_interpret()
|
||||||
.execute(inventory, topology)
|
.execute(inventory, topology)
|
||||||
|
|||||||
@ -2,45 +2,18 @@ use async_trait::async_trait;
|
|||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
inventory::Inventory,
|
modules::application::ApplicationFeature,
|
||||||
modules::{
|
topology::{HelmCommand, Topology},
|
||||||
application::{Application, ApplicationFeature},
|
|
||||||
monitoring::{
|
|
||||||
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
|
|
||||||
kube_prometheus::{
|
|
||||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
|
||||||
types::{NamespaceSelector, ServiceMonitor},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::{HelmCommand, Topology, tenant::TenantManager},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Default, Clone)]
|
||||||
pub struct Monitoring {}
|
pub struct Monitoring {}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HelmCommand + 'static + TenantManager> ApplicationFeature<T> for Monitoring {
|
impl<T: Topology + HelmCommand + 'static> ApplicationFeature<T> for Monitoring {
|
||||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
async fn ensure_installed(&self, _topology: &T) -> Result<(), String> {
|
||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
let mut service_monitor = ServiceMonitor::default();
|
todo!("create and execute k8s prometheus score, depends on Will's work")
|
||||||
service_monitor.namespace_selector = Some(NamespaceSelector {
|
|
||||||
any: true,
|
|
||||||
match_names: vec![],
|
|
||||||
});
|
|
||||||
let alerting_score = ApplicationPrometheusMonitoringScore {
|
|
||||||
receivers: vec![],
|
|
||||||
rules: vec![],
|
|
||||||
service_monitors: vec![service_monitor],
|
|
||||||
};
|
|
||||||
|
|
||||||
alerting_score
|
|
||||||
.create_interpret()
|
|
||||||
.execute(&Inventory::empty(), topology)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"Monitoring".to_string()
|
"Monitoring".to_string()
|
||||||
|
|||||||
@ -10,25 +10,14 @@ use crate::{
|
|||||||
topology::{HttpServer, Topology, Url},
|
topology::{HttpServer, Topology, Url},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Configure an HTTP server that is provided by the Topology
|
|
||||||
///
|
|
||||||
/// This Score will let you easily specify a file path to be served by the HTTP server
|
|
||||||
///
|
|
||||||
/// For example, if you have a folder of assets at `/var/www/assets` simply do :
|
|
||||||
///
|
|
||||||
/// ```rust,ignore
|
|
||||||
/// StaticFilesHttpScore {
|
|
||||||
/// files_to_serve: url!("file:///var/www/assets"),
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, new, Clone, Serialize)]
|
#[derive(Debug, new, Clone, Serialize)]
|
||||||
pub struct StaticFilesHttpScore {
|
pub struct HttpScore {
|
||||||
files_to_serve: Url,
|
files_to_serve: Url,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
impl<T: Topology + HttpServer> Score<T> for HttpScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(StaticFilesHttpInterpret::new(self.clone()))
|
Box::new(HttpInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@ -37,12 +26,12 @@ impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, new, Clone)]
|
#[derive(Debug, new, Clone)]
|
||||||
pub struct StaticFilesHttpInterpret {
|
pub struct HttpInterpret {
|
||||||
score: StaticFilesHttpScore,
|
score: HttpScore,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
impl<T: Topology + HttpServer> Interpret<T> for HttpInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
|
|||||||
@ -4,12 +4,9 @@ use serde_yaml::{Mapping, Value};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::{
|
modules::monitoring::kube_prometheus::{
|
||||||
kube_prometheus::{
|
prometheus::{Prometheus, PrometheusReceiver},
|
||||||
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
|
||||||
},
|
|
||||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
|
||||||
},
|
},
|
||||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||||
};
|
};
|
||||||
@ -40,26 +37,6 @@ impl PrometheusReceiver for DiscordWebhook {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
|
||||||
sender.install_receiver(self).await
|
|
||||||
}
|
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl KubePrometheusReceiver for DiscordWebhook {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
self.name.clone()
|
|
||||||
}
|
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
|
||||||
self.get_config().await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertChannelConfig for DiscordWebhook {
|
impl AlertChannelConfig for DiscordWebhook {
|
||||||
async fn get_config(&self) -> AlertManagerChannelConfig {
|
async fn get_config(&self) -> AlertManagerChannelConfig {
|
||||||
|
|||||||
@ -4,12 +4,9 @@ use serde_yaml::{Mapping, Value};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::{
|
modules::monitoring::kube_prometheus::{
|
||||||
kube_prometheus::{
|
prometheus::{Prometheus, PrometheusReceiver},
|
||||||
prometheus::{KubePrometheus, KubePrometheusReceiver},
|
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
||||||
types::{AlertChannelConfig, AlertManagerChannelConfig},
|
|
||||||
},
|
|
||||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
|
||||||
},
|
},
|
||||||
topology::{Url, oberservability::monitoring::AlertReceiver},
|
topology::{Url, oberservability::monitoring::AlertReceiver},
|
||||||
};
|
};
|
||||||
@ -39,25 +36,6 @@ impl PrometheusReceiver for WebhookReceiver {
|
|||||||
self.get_config().await
|
self.get_config().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
|
||||||
sender.install_receiver(self).await
|
|
||||||
}
|
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<KubePrometheus>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl KubePrometheusReceiver for WebhookReceiver {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
self.name.clone()
|
|
||||||
}
|
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig {
|
|
||||||
self.get_config().await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertChannelConfig for WebhookReceiver {
|
impl AlertChannelConfig for WebhookReceiver {
|
||||||
|
|||||||
@ -5,26 +5,13 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::{
|
modules::monitoring::kube_prometheus::{
|
||||||
kube_prometheus::{
|
prometheus::{Prometheus, PrometheusRule},
|
||||||
prometheus::{KubePrometheus, KubePrometheusRule},
|
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
||||||
types::{AlertGroup, AlertManagerAdditionalPromRules},
|
|
||||||
},
|
|
||||||
prometheus::prometheus::{Prometheus, PrometheusRule},
|
|
||||||
},
|
},
|
||||||
topology::oberservability::monitoring::AlertRule,
|
topology::oberservability::monitoring::AlertRule,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
|
||||||
sender.install_rule(&self).await
|
|
||||||
}
|
|
||||||
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
|
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
@ -54,25 +41,6 @@ impl PrometheusRule for AlertManagerRuleGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[async_trait]
|
|
||||||
impl KubePrometheusRule for AlertManagerRuleGroup {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
self.name.clone()
|
|
||||||
}
|
|
||||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules {
|
|
||||||
let mut additional_prom_rules = BTreeMap::new();
|
|
||||||
|
|
||||||
additional_prom_rules.insert(
|
|
||||||
self.name.clone(),
|
|
||||||
AlertGroup {
|
|
||||||
groups: vec![self.clone()],
|
|
||||||
},
|
|
||||||
);
|
|
||||||
AlertManagerAdditionalPromRules {
|
|
||||||
rules: additional_prom_rules,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AlertManagerRuleGroup {
|
impl AlertManagerRuleGroup {
|
||||||
pub fn new(name: &str, rules: Vec<PrometheusAlertRule>) -> AlertManagerRuleGroup {
|
pub fn new(name: &str, rules: Vec<PrometheusAlertRule>) -> AlertManagerRuleGroup {
|
||||||
|
|||||||
@ -1,41 +0,0 @@
|
|||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
modules::monitoring::{
|
|
||||||
kube_prometheus::types::ServiceMonitor,
|
|
||||||
prometheus::{prometheus::Prometheus, prometheus_config::PrometheusConfig},
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::{
|
|
||||||
HelmCommand, Topology,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertingInterpret},
|
|
||||||
tenant::TenantManager,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
pub struct ApplicationPrometheusMonitoringScore {
|
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
|
||||||
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
|
||||||
pub service_monitors: Vec<ServiceMonitor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
|
||||||
let config = Arc::new(Mutex::new(PrometheusConfig::new()));
|
|
||||||
config
|
|
||||||
.try_lock()
|
|
||||||
.expect("couldn't lock config")
|
|
||||||
.additional_service_monitors = self.service_monitors.clone();
|
|
||||||
Box::new(AlertingInterpret {
|
|
||||||
sender: Prometheus::new(),
|
|
||||||
receivers: self.receivers.clone(),
|
|
||||||
rules: self.rules.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"ApplicationPrometheusMonitoringScore".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1 +0,0 @@
|
|||||||
pub mod k8s_application_monitoring_score;
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
use non_blank_string_rs::NonBlankString;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
|
||||||
|
|
||||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
|
||||||
let values = format!(
|
|
||||||
r#"
|
|
||||||
rbac:
|
|
||||||
namespaced: true
|
|
||||||
sidecar:
|
|
||||||
dashboards:
|
|
||||||
enabled: true
|
|
||||||
"#
|
|
||||||
);
|
|
||||||
|
|
||||||
HelmChartScore {
|
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
|
||||||
release_name: NonBlankString::from_str("grafana").unwrap(),
|
|
||||||
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
|
||||||
chart_version: None,
|
|
||||||
values_overrides: None,
|
|
||||||
values_yaml: Some(values.to_string()),
|
|
||||||
create_namespace: true,
|
|
||||||
install_only: true,
|
|
||||||
repository: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1 +0,0 @@
|
|||||||
pub mod helm_grafana;
|
|
||||||
@ -1 +0,0 @@
|
|||||||
pub mod helm;
|
|
||||||
@ -68,9 +68,6 @@ pub fn kube_prometheus_helm_chart_score(
|
|||||||
|
|
||||||
let mut values = format!(
|
let mut values = format!(
|
||||||
r#"
|
r#"
|
||||||
global:
|
|
||||||
rbac:
|
|
||||||
create: false
|
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: {prometheus}
|
enabled: {prometheus}
|
||||||
prometheusSpec:
|
prometheusSpec:
|
||||||
@ -245,7 +242,7 @@ prometheus-node-exporter:
|
|||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
prometheusOperator:
|
prometheusOperator:
|
||||||
enabled: false
|
enabled: {prometheus_operator}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
|
|||||||
@ -2,7 +2,7 @@ use std::sync::{Arc, Mutex};
|
|||||||
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use super::{helm::config::KubePrometheusConfig, prometheus::KubePrometheus};
|
use super::{helm::config::KubePrometheusConfig, prometheus::Prometheus};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::monitoring::kube_prometheus::types::ServiceMonitor,
|
modules::monitoring::kube_prometheus::types::ServiceMonitor,
|
||||||
score::Score,
|
score::Score,
|
||||||
@ -15,8 +15,8 @@ use crate::{
|
|||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct HelmPrometheusAlertingScore {
|
pub struct HelmPrometheusAlertingScore {
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<KubePrometheus>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
||||||
pub rules: Vec<Box<dyn AlertRule<KubePrometheus>>>,
|
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
|
||||||
pub service_monitors: Vec<ServiceMonitor>,
|
pub service_monitors: Vec<ServiceMonitor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
.expect("couldn't lock config")
|
.expect("couldn't lock config")
|
||||||
.additional_service_monitors = self.service_monitors.clone();
|
.additional_service_monitors = self.service_monitors.clone();
|
||||||
Box::new(AlertingInterpret {
|
Box::new(AlertingInterpret {
|
||||||
sender: KubePrometheus::new(),
|
sender: Prometheus::new(),
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
})
|
})
|
||||||
|
|||||||
@ -27,14 +27,14 @@ use super::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertSender for KubePrometheus {
|
impl AlertSender for Prometheus {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"HelmKubePrometheus".to_string()
|
"HelmKubePrometheus".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for KubePrometheus {
|
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
||||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
self.configure_with_topology(topology).await;
|
self.configure_with_topology(topology).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -51,11 +51,11 @@ impl<T: Topology + HelmCommand + TenantManager> Installable<T> for KubePrometheu
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct KubePrometheus {
|
pub struct Prometheus {
|
||||||
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KubePrometheus {
|
impl Prometheus {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
||||||
@ -75,7 +75,7 @@ impl KubePrometheus {
|
|||||||
|
|
||||||
pub async fn install_receiver(
|
pub async fn install_receiver(
|
||||||
&self,
|
&self,
|
||||||
prometheus_receiver: &dyn KubePrometheusReceiver,
|
prometheus_receiver: &dyn PrometheusReceiver,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
||||||
debug!(
|
debug!(
|
||||||
@ -120,12 +120,12 @@ impl KubePrometheus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait KubePrometheusReceiver: Send + Sync + std::fmt::Debug {
|
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<KubePrometheus>> {
|
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
@ -134,19 +134,19 @@ impl Serialize for Box<dyn AlertReceiver<KubePrometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertReceiver<KubePrometheus>> {
|
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait KubePrometheusRule: Send + Sync + std::fmt::Debug {
|
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertRule<KubePrometheus>> {
|
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
@ -155,7 +155,7 @@ impl Serialize for Box<dyn AlertRule<KubePrometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertRule<KubePrometheus>> {
|
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
self.clone_box()
|
self.clone_box()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -212,7 +212,7 @@ pub struct ServiceMonitor {
|
|||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
|
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
|
||||||
pub additional_labels: Option<HashMap<String, String>>,
|
pub additional_labels: Option<Mapping>,
|
||||||
|
|
||||||
// # Service label for use in assembling a job name of the form <label value>-<port>
|
// # Service label for use in assembling a job name of the form <label value>-<port>
|
||||||
// # If no label is specified, the service name is used.
|
// # If no label is specified, the service name is used.
|
||||||
@ -240,7 +240,7 @@ pub struct ServiceMonitor {
|
|||||||
// any: bool,
|
// any: bool,
|
||||||
// # Explicit list of namespace names to select
|
// # Explicit list of namespace names to select
|
||||||
// matchNames: Vec,
|
// matchNames: Vec,
|
||||||
pub namespace_selector: Option<NamespaceSelector>,
|
pub namespace_selector: Option<Mapping>,
|
||||||
|
|
||||||
// # Endpoints of the selected service to be monitored
|
// # Endpoints of the selected service to be monitored
|
||||||
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
pub endpoints: Vec<ServiceMonitorEndpoint>,
|
||||||
@ -250,13 +250,6 @@ pub struct ServiceMonitor {
|
|||||||
pub fallback_scrape_protocol: Option<String>,
|
pub fallback_scrape_protocol: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NamespaceSelector {
|
|
||||||
pub any: bool,
|
|
||||||
pub match_names: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ServiceMonitor {
|
impl Default for ServiceMonitor {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
|||||||
@ -1,7 +1,4 @@
|
|||||||
pub mod alert_channel;
|
pub mod alert_channel;
|
||||||
pub mod alert_rule;
|
pub mod alert_rule;
|
||||||
pub mod application_monitoring;
|
|
||||||
pub mod grafana;
|
|
||||||
pub mod kube_prometheus;
|
pub mod kube_prometheus;
|
||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
pub mod prometheus;
|
|
||||||
|
|||||||
@ -1 +0,0 @@
|
|||||||
pub mod prometheus_helm;
|
|
||||||
@ -1,47 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
|
||||||
|
|
||||||
use crate::modules::{
|
|
||||||
helm::chart::HelmChartScore, monitoring::prometheus::prometheus_config::PrometheusConfig,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn prometheus_helm_chart_score(config: Arc<Mutex<PrometheusConfig>>) -> HelmChartScore {
|
|
||||||
let config = config.lock().unwrap();
|
|
||||||
let ns = config.namespace.clone().unwrap();
|
|
||||||
let values = format!(
|
|
||||||
r#"
|
|
||||||
rbac:
|
|
||||||
create: true
|
|
||||||
kube-state-metrics:
|
|
||||||
enabled: false
|
|
||||||
nodeExporter:
|
|
||||||
enabled: false
|
|
||||||
alertmanager:
|
|
||||||
enabled: false
|
|
||||||
pushgateway:
|
|
||||||
enabled: false
|
|
||||||
server:
|
|
||||||
serviceAccount:
|
|
||||||
create: false
|
|
||||||
rbac:
|
|
||||||
create: true
|
|
||||||
fullnameOverride: prometheus-{ns}
|
|
||||||
"#
|
|
||||||
);
|
|
||||||
HelmChartScore {
|
|
||||||
namespace: Some(NonBlankString::from_str(&config.namespace.clone().unwrap()).unwrap()),
|
|
||||||
release_name: NonBlankString::from_str("prometheus").unwrap(),
|
|
||||||
chart_name: NonBlankString::from_str(
|
|
||||||
"oci://ghcr.io/prometheus-community/charts/prometheus",
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
chart_version: None,
|
|
||||||
values_overrides: None,
|
|
||||||
values_yaml: Some(values.to_string()),
|
|
||||||
create_namespace: true,
|
|
||||||
install_only: true,
|
|
||||||
repository: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
pub mod helm;
|
|
||||||
pub mod prometheus;
|
|
||||||
pub mod prometheus_config;
|
|
||||||
@ -1,190 +0,0 @@
|
|||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use log::{debug, error};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
interpret::{InterpretError, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::monitoring::{
|
|
||||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
|
||||||
grafana::helm::helm_grafana::grafana_helm_chart_score,
|
|
||||||
kube_prometheus::types::{AlertManagerAdditionalPromRules, AlertManagerChannelConfig},
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::{
|
|
||||||
HelmCommand, Topology,
|
|
||||||
installable::Installable,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertRule, AlertSender},
|
|
||||||
tenant::TenantManager,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
helm::prometheus_helm::prometheus_helm_chart_score, prometheus_config::PrometheusConfig,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Prometheus {
|
|
||||||
pub config: Arc<Mutex<PrometheusConfig>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertSender for Prometheus {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"Prometheus".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Prometheus {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
config: Arc::new(Mutex::new(PrometheusConfig::new())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
|
||||||
let ns = topology
|
|
||||||
.get_tenant_config()
|
|
||||||
.await
|
|
||||||
.map(|cfg| cfg.name.clone())
|
|
||||||
.unwrap_or_else(|| "monitoring".to_string());
|
|
||||||
error!("This must be refactored, see comments in pr #74");
|
|
||||||
debug!("NS: {}", ns);
|
|
||||||
self.config.lock().unwrap().namespace = Some(ns);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn install_receiver(
|
|
||||||
&self,
|
|
||||||
prometheus_receiver: &dyn PrometheusReceiver,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let prom_receiver = prometheus_receiver.configure_receiver().await;
|
|
||||||
debug!(
|
|
||||||
"adding alert receiver to prometheus config: {:#?}",
|
|
||||||
&prom_receiver
|
|
||||||
);
|
|
||||||
let mut config = self.config.lock().unwrap();
|
|
||||||
|
|
||||||
config.alert_receiver_configs.push(prom_receiver);
|
|
||||||
let prom_receiver_name = prometheus_receiver.name();
|
|
||||||
debug!("installed alert receiver {}", &prom_receiver_name);
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Sucessfully installed receiver {}",
|
|
||||||
prom_receiver_name
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn install_rule(
|
|
||||||
&self,
|
|
||||||
prometheus_rule: &AlertManagerRuleGroup,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let prometheus_rule = prometheus_rule.configure_rule().await;
|
|
||||||
let mut config = self.config.lock().unwrap();
|
|
||||||
|
|
||||||
config.alert_rules.push(prometheus_rule.clone());
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Successfully installed alert rule: {:#?},",
|
|
||||||
prometheus_rule
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn install_prometheus<T: Topology + HelmCommand + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
prometheus_helm_chart_score(self.config.clone())
|
|
||||||
.create_interpret()
|
|
||||||
.execute(inventory, topology)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
pub async fn install_grafana<T: Topology + HelmCommand + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = {
|
|
||||||
let config = self.config.lock().unwrap();
|
|
||||||
config.namespace.clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(ns) = namespace.as_deref() {
|
|
||||||
grafana_helm_chart_score(ns)
|
|
||||||
.create_interpret()
|
|
||||||
.execute(inventory, topology)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"could not install grafana, missing namespace",
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
|
||||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
|
||||||
self.configure_with_topology(topology).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_installed(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
self.install_prometheus(inventory, topology).await?;
|
|
||||||
|
|
||||||
let install_grafana = {
|
|
||||||
let config = self.config.lock().unwrap();
|
|
||||||
config.grafana
|
|
||||||
};
|
|
||||||
|
|
||||||
if install_grafana {
|
|
||||||
self.install_grafana(inventory, topology).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait PrometheusReceiver: Send + Sync + std::fmt::Debug {
|
|
||||||
fn name(&self) -> String;
|
|
||||||
async fn configure_receiver(&self) -> AlertManagerChannelConfig;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<Prometheus>> {
|
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertReceiver<Prometheus>> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.clone_box()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait PrometheusRule: Send + Sync + std::fmt::Debug {
|
|
||||||
fn name(&self) -> String;
|
|
||||||
async fn configure_rule(&self) -> AlertManagerAdditionalPromRules;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertRule<Prometheus>> {
|
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertRule<Prometheus>> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.clone_box()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
use crate::modules::monitoring::kube_prometheus::types::{
|
|
||||||
AlertManagerAdditionalPromRules, AlertManagerChannelConfig, ServiceMonitor,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct PrometheusConfig {
|
|
||||||
pub namespace: Option<String>,
|
|
||||||
pub default_rules: bool,
|
|
||||||
pub alert_manager: bool,
|
|
||||||
pub node_exporter: bool,
|
|
||||||
pub kube_state_metrics: bool,
|
|
||||||
pub grafana: bool,
|
|
||||||
pub prometheus_pushgateway: bool,
|
|
||||||
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
|
|
||||||
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
|
|
||||||
pub additional_service_monitors: Vec<ServiceMonitor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PrometheusConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
namespace: None,
|
|
||||||
default_rules: true,
|
|
||||||
alert_manager: true,
|
|
||||||
node_exporter: false,
|
|
||||||
kube_state_metrics: false,
|
|
||||||
grafana: true,
|
|
||||||
prometheus_pushgateway: false,
|
|
||||||
alert_receiver_configs: vec![],
|
|
||||||
alert_rules: vec![],
|
|
||||||
additional_service_monitors: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -22,4 +22,4 @@ tokio-util = { version = "0.7.13", features = [ "codec" ] }
|
|||||||
tokio-stream = "0.1.17"
|
tokio-stream = "0.1.17"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions = "1.4.1"
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user