worked with will to get monitoring + ntfy demoed/tested
Some checks failed
Run Check Script / check (pull_request) Failing after -47s
Some checks failed
Run Check Script / check (pull_request) Failing after -47s
This commit is contained in:
parent
81bee7e12a
commit
d0d80aee28
@ -14,6 +14,7 @@ async fn main() {
|
|||||||
|
|
||||||
maestro.register_all(vec![Box::new(NtfyScore {
|
maestro.register_all(vec![Box::new(NtfyScore {
|
||||||
namespace: "monitoring".to_string(),
|
namespace: "monitoring".to_string(),
|
||||||
|
host: "localhost".to_string(),
|
||||||
})]);
|
})]);
|
||||||
harmony_cli::init(maestro, None).await.unwrap();
|
harmony_cli::init(maestro, None).await.unwrap();
|
||||||
}
|
}
|
||||||
|
@ -159,8 +159,8 @@ impl<
|
|||||||
info!("Pushed new helm chart {helm_chart}");
|
info!("Pushed new helm chart {helm_chart}");
|
||||||
|
|
||||||
error!("TODO Make building image configurable/skippable");
|
error!("TODO Make building image configurable/skippable");
|
||||||
let image = self.application.build_push_oci_image().await?;
|
// let image = self.application.build_push_oci_image().await?;
|
||||||
info!("Pushed new docker image {image}");
|
// info!("Pushed new docker image {image}");
|
||||||
|
|
||||||
info!("Installing ContinuousDelivery feature");
|
info!("Installing ContinuousDelivery feature");
|
||||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine as _, engine::general_purpose};
|
use base64::{Engine as _, engine::general_purpose};
|
||||||
use log::info;
|
use log::{debug, info};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
@ -9,7 +9,10 @@ use crate::{
|
|||||||
monitoring::{
|
monitoring::{
|
||||||
alert_channel::webhook_receiver::WebhookReceiver,
|
alert_channel::webhook_receiver::WebhookReceiver,
|
||||||
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
|
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
|
||||||
kube_prometheus::types::{NamespaceSelector, ServiceMonitor},
|
kube_prometheus::{
|
||||||
|
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||||
|
types::{NamespaceSelector, ServiceMonitor},
|
||||||
|
},
|
||||||
ntfy::ntfy::NtfyScore,
|
ntfy::ntfy::NtfyScore,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -28,11 +31,13 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
|||||||
info!("Ensuring monitoring is available for application");
|
info!("Ensuring monitoring is available for application");
|
||||||
|
|
||||||
let ntfy = NtfyScore {
|
let ntfy = NtfyScore {
|
||||||
namespace: topology
|
// namespace: topology
|
||||||
.get_tenant_config()
|
// .get_tenant_config()
|
||||||
.await
|
// .await
|
||||||
.expect("couldn't get tenant config")
|
// .expect("couldn't get tenant config")
|
||||||
.name,
|
// .name,
|
||||||
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
|
host: "localhost".to_string(),
|
||||||
};
|
};
|
||||||
ntfy.create_interpret()
|
ntfy.create_interpret()
|
||||||
.execute(&Inventory::empty(), topology)
|
.execute(&Inventory::empty(), topology)
|
||||||
@ -48,11 +53,13 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
|||||||
))
|
))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||||
|
|
||||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||||
.encode(ntfy_default_auth_header)
|
.encode(ntfy_default_auth_header)
|
||||||
.rsplit("=")
|
.replace("=", "");
|
||||||
.collect::<Vec<&str>>()[0]
|
|
||||||
.to_string();
|
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||||
|
|
||||||
let ntfy_receiver = WebhookReceiver {
|
let ntfy_receiver = WebhookReceiver {
|
||||||
name: "ntfy-webhook".to_string(),
|
name: "ntfy-webhook".to_string(),
|
||||||
@ -60,7 +67,7 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
|||||||
url::Url::parse(
|
url::Url::parse(
|
||||||
format!(
|
format!(
|
||||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||||
topology.get_tenant_config().await.expect("couldn't get tenant config").name
|
"harmonydemo-staging".to_string()
|
||||||
)
|
)
|
||||||
.as_str(),
|
.as_str(),
|
||||||
)
|
)
|
||||||
@ -73,7 +80,16 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
|||||||
any: true,
|
any: true,
|
||||||
match_names: vec![],
|
match_names: vec![],
|
||||||
});
|
});
|
||||||
let alerting_score = ApplicationPrometheusMonitoringScore {
|
|
||||||
|
service_monitor.name = "rust-webapp".to_string();
|
||||||
|
|
||||||
|
// let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||||
|
// receivers: vec![Box::new(ntfy_receiver)],
|
||||||
|
// rules: vec![],
|
||||||
|
// service_monitors: vec![service_monitor],
|
||||||
|
// };
|
||||||
|
|
||||||
|
let alerting_score = HelmPrometheusAlertingScore {
|
||||||
receivers: vec![Box::new(ntfy_receiver)],
|
receivers: vec![Box::new(ntfy_receiver)],
|
||||||
rules: vec![],
|
rules: vec![],
|
||||||
service_monitors: vec![service_monitor],
|
service_monitors: vec![service_monitor],
|
||||||
|
@ -24,7 +24,10 @@ pub struct ApplicationPrometheusMonitoringScore {
|
|||||||
|
|
||||||
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
||||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
let config = Arc::new(Mutex::new(PrometheusConfig::new()));
|
let mut prom_config = PrometheusConfig::new();
|
||||||
|
prom_config.alert_manager = true;
|
||||||
|
|
||||||
|
let config = Arc::new(Mutex::new(prom_config));
|
||||||
config
|
config
|
||||||
.try_lock()
|
.try_lock()
|
||||||
.expect("couldn't lock config")
|
.expect("couldn't lock config")
|
||||||
|
@ -38,15 +38,15 @@ impl KubePrometheusConfig {
|
|||||||
node_exporter: false,
|
node_exporter: false,
|
||||||
prometheus: true,
|
prometheus: true,
|
||||||
kubernetes_service_monitors: true,
|
kubernetes_service_monitors: true,
|
||||||
kubernetes_api_server: false,
|
kubernetes_api_server: true,
|
||||||
kubelet: true,
|
kubelet: true,
|
||||||
kube_controller_manager: false,
|
kube_controller_manager: true,
|
||||||
kube_etcd: false,
|
kube_etcd: true,
|
||||||
kube_proxy: false,
|
kube_proxy: true,
|
||||||
kube_state_metrics: true,
|
kube_state_metrics: true,
|
||||||
prometheus_operator: true,
|
prometheus_operator: true,
|
||||||
core_dns: false,
|
core_dns: true,
|
||||||
kube_scheduler: false,
|
kube_scheduler: true,
|
||||||
alert_receiver_configs: vec![],
|
alert_receiver_configs: vec![],
|
||||||
alert_rules: vec![],
|
alert_rules: vec![],
|
||||||
additional_service_monitors: vec![],
|
additional_service_monitors: vec![],
|
||||||
|
@ -70,12 +70,12 @@ pub fn kube_prometheus_helm_chart_score(
|
|||||||
r#"
|
r#"
|
||||||
global:
|
global:
|
||||||
rbac:
|
rbac:
|
||||||
create: false
|
create: true
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: {prometheus}
|
enabled: {prometheus}
|
||||||
prometheusSpec:
|
prometheusSpec:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 500Mi
|
memory: 500Mi
|
||||||
limits:
|
limits:
|
||||||
@ -121,7 +121,7 @@ defaultRules:
|
|||||||
windowsMonitoring:
|
windowsMonitoring:
|
||||||
enabled: {windows_monitoring}
|
enabled: {windows_monitoring}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -130,13 +130,13 @@ windowsMonitoring:
|
|||||||
grafana:
|
grafana:
|
||||||
enabled: {grafana}
|
enabled: {grafana}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
initChownData:
|
initChownData:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
@ -157,7 +157,7 @@ kubernetesServiceMonitors:
|
|||||||
kubeApiServer:
|
kubeApiServer:
|
||||||
enabled: {kubernetes_api_server}
|
enabled: {kubernetes_api_server}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -166,7 +166,7 @@ kubeApiServer:
|
|||||||
kubelet:
|
kubelet:
|
||||||
enabled: {kubelet}
|
enabled: {kubelet}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -175,7 +175,7 @@ kubelet:
|
|||||||
kubeControllerManager:
|
kubeControllerManager:
|
||||||
enabled: {kube_controller_manager}
|
enabled: {kube_controller_manager}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -184,7 +184,7 @@ kubeControllerManager:
|
|||||||
coreDns:
|
coreDns:
|
||||||
enabled: {core_dns}
|
enabled: {core_dns}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -193,7 +193,7 @@ coreDns:
|
|||||||
kubeEtcd:
|
kubeEtcd:
|
||||||
enabled: {kube_etcd}
|
enabled: {kube_etcd}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -202,7 +202,7 @@ kubeEtcd:
|
|||||||
kubeScheduler:
|
kubeScheduler:
|
||||||
enabled: {kube_scheduler}
|
enabled: {kube_scheduler}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -211,7 +211,7 @@ kubeScheduler:
|
|||||||
kubeProxy:
|
kubeProxy:
|
||||||
enabled: {kube_proxy}
|
enabled: {kube_proxy}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -221,7 +221,7 @@ kubeStateMetrics:
|
|||||||
enabled: {kube_state_metrics}
|
enabled: {kube_state_metrics}
|
||||||
kube-state-metrics:
|
kube-state-metrics:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -230,7 +230,7 @@ kube-state-metrics:
|
|||||||
nodeExporter:
|
nodeExporter:
|
||||||
enabled: {node_exporter}
|
enabled: {node_exporter}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -238,16 +238,16 @@ nodeExporter:
|
|||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
prometheus-node-exporter:
|
prometheus-node-exporter:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 250Mi
|
memory: 250Mi
|
||||||
prometheusOperator:
|
prometheusOperator:
|
||||||
enabled: false
|
enabled: true
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -255,7 +255,7 @@ prometheusOperator:
|
|||||||
memory: 200Mi
|
memory: 200Mi
|
||||||
prometheusConfigReloader:
|
prometheusConfigReloader:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 150Mi
|
memory: 150Mi
|
||||||
limits:
|
limits:
|
||||||
@ -267,7 +267,7 @@ prometheusOperator:
|
|||||||
limits:
|
limits:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
patch:
|
patch:
|
||||||
@ -275,7 +275,7 @@ prometheusOperator:
|
|||||||
limits:
|
limits:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
"#,
|
"#,
|
||||||
|
@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
.expect("couldn't lock config")
|
.expect("couldn't lock config")
|
||||||
.additional_service_monitors = self.service_monitors.clone();
|
.additional_service_monitors = self.service_monitors.clone();
|
||||||
Box::new(AlertingInterpret {
|
Box::new(AlertingInterpret {
|
||||||
sender: KubePrometheus::new(),
|
sender: KubePrometheus { config },
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
})
|
})
|
||||||
|
@ -3,7 +3,7 @@ use std::str::FromStr;
|
|||||||
|
|
||||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||||
|
|
||||||
pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore {
|
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
|
||||||
let values = format!(
|
let values = format!(
|
||||||
r#"
|
r#"
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
@ -28,12 +28,12 @@ service:
|
|||||||
port: 80
|
port: 80
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: false
|
enabled: true
|
||||||
# annotations:
|
# annotations:
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
hosts:
|
hosts:
|
||||||
- host: ntfy.host.com
|
- host: {host}
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
|
@ -17,6 +17,7 @@ use crate::{
|
|||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct NtfyScore {
|
pub struct NtfyScore {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
|
pub host: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
||||||
@ -126,7 +127,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
ntfy_helm_chart_score(self.score.namespace.clone())
|
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
|
||||||
.create_interpret()
|
.create_interpret()
|
||||||
.execute(inventory, topology)
|
.execute(inventory, topology)
|
||||||
.await?;
|
.await?;
|
||||||
|
Loading…
Reference in New Issue
Block a user