worked with will to get monitoring + ntfy demoed/tested
Some checks failed
Run Check Script / check (pull_request) Failing after -47s
Some checks failed
Run Check Script / check (pull_request) Failing after -47s
This commit is contained in:
parent
81bee7e12a
commit
d0d80aee28
@ -14,6 +14,7 @@ async fn main() {
|
||||
|
||||
maestro.register_all(vec![Box::new(NtfyScore {
|
||||
namespace: "monitoring".to_string(),
|
||||
host: "localhost".to_string(),
|
||||
})]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
@ -159,8 +159,8 @@ impl<
|
||||
info!("Pushed new helm chart {helm_chart}");
|
||||
|
||||
error!("TODO Make building image configurable/skippable");
|
||||
let image = self.application.build_push_oci_image().await?;
|
||||
info!("Pushed new docker image {image}");
|
||||
// let image = self.application.build_push_oci_image().await?;
|
||||
// info!("Pushed new docker image {image}");
|
||||
|
||||
info!("Installing ContinuousDelivery feature");
|
||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
|
@ -1,6 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use log::info;
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{
|
||||
inventory::Inventory,
|
||||
@ -9,7 +9,10 @@ use crate::{
|
||||
monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver,
|
||||
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
|
||||
kube_prometheus::types::{NamespaceSelector, ServiceMonitor},
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{NamespaceSelector, ServiceMonitor},
|
||||
},
|
||||
ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
},
|
||||
@ -28,11 +31,13 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
info!("Ensuring monitoring is available for application");
|
||||
|
||||
let ntfy = NtfyScore {
|
||||
namespace: topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.expect("couldn't get tenant config")
|
||||
.name,
|
||||
// namespace: topology
|
||||
// .get_tenant_config()
|
||||
// .await
|
||||
// .expect("couldn't get tenant config")
|
||||
// .name,
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
host: "localhost".to_string(),
|
||||
};
|
||||
ntfy.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
@ -48,11 +53,13 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
))
|
||||
);
|
||||
|
||||
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
|
||||
|
||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||
.encode(ntfy_default_auth_header)
|
||||
.rsplit("=")
|
||||
.collect::<Vec<&str>>()[0]
|
||||
.to_string();
|
||||
.replace("=", "");
|
||||
|
||||
debug!("ntfy_default_auth_param: {ntfy_default_auth_param}");
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
@ -60,7 +67,7 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
topology.get_tenant_config().await.expect("couldn't get tenant config").name
|
||||
"harmonydemo-staging".to_string()
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
@ -73,7 +80,16 @@ impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> Applicatio
|
||||
any: true,
|
||||
match_names: vec![],
|
||||
});
|
||||
let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||
|
||||
service_monitor.name = "rust-webapp".to_string();
|
||||
|
||||
// let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||
// receivers: vec![Box::new(ntfy_receiver)],
|
||||
// rules: vec![],
|
||||
// service_monitors: vec![service_monitor],
|
||||
// };
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(ntfy_receiver)],
|
||||
rules: vec![],
|
||||
service_monitors: vec![service_monitor],
|
||||
|
@ -24,7 +24,10 @@ pub struct ApplicationPrometheusMonitoringScore {
|
||||
|
||||
impl<T: Topology + HelmCommand + TenantManager> Score<T> for ApplicationPrometheusMonitoringScore {
|
||||
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||
let config = Arc::new(Mutex::new(PrometheusConfig::new()));
|
||||
let mut prom_config = PrometheusConfig::new();
|
||||
prom_config.alert_manager = true;
|
||||
|
||||
let config = Arc::new(Mutex::new(prom_config));
|
||||
config
|
||||
.try_lock()
|
||||
.expect("couldn't lock config")
|
||||
|
@ -38,15 +38,15 @@ impl KubePrometheusConfig {
|
||||
node_exporter: false,
|
||||
prometheus: true,
|
||||
kubernetes_service_monitors: true,
|
||||
kubernetes_api_server: false,
|
||||
kubernetes_api_server: true,
|
||||
kubelet: true,
|
||||
kube_controller_manager: false,
|
||||
kube_etcd: false,
|
||||
kube_proxy: false,
|
||||
kube_controller_manager: true,
|
||||
kube_etcd: true,
|
||||
kube_proxy: true,
|
||||
kube_state_metrics: true,
|
||||
prometheus_operator: true,
|
||||
core_dns: false,
|
||||
kube_scheduler: false,
|
||||
core_dns: true,
|
||||
kube_scheduler: true,
|
||||
alert_receiver_configs: vec![],
|
||||
alert_rules: vec![],
|
||||
additional_service_monitors: vec![],
|
||||
|
@ -70,12 +70,12 @@ pub fn kube_prometheus_helm_chart_score(
|
||||
r#"
|
||||
global:
|
||||
rbac:
|
||||
create: false
|
||||
create: true
|
||||
prometheus:
|
||||
enabled: {prometheus}
|
||||
prometheusSpec:
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
limits:
|
||||
@ -121,7 +121,7 @@ defaultRules:
|
||||
windowsMonitoring:
|
||||
enabled: {windows_monitoring}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -130,13 +130,13 @@ windowsMonitoring:
|
||||
grafana:
|
||||
enabled: {grafana}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
initChownData:
|
||||
initChownData:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
@ -157,7 +157,7 @@ kubernetesServiceMonitors:
|
||||
kubeApiServer:
|
||||
enabled: {kubernetes_api_server}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -166,7 +166,7 @@ kubeApiServer:
|
||||
kubelet:
|
||||
enabled: {kubelet}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -175,7 +175,7 @@ kubelet:
|
||||
kubeControllerManager:
|
||||
enabled: {kube_controller_manager}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -184,7 +184,7 @@ kubeControllerManager:
|
||||
coreDns:
|
||||
enabled: {core_dns}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -193,7 +193,7 @@ coreDns:
|
||||
kubeEtcd:
|
||||
enabled: {kube_etcd}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -202,7 +202,7 @@ kubeEtcd:
|
||||
kubeScheduler:
|
||||
enabled: {kube_scheduler}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -211,7 +211,7 @@ kubeScheduler:
|
||||
kubeProxy:
|
||||
enabled: {kube_proxy}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -221,7 +221,7 @@ kubeStateMetrics:
|
||||
enabled: {kube_state_metrics}
|
||||
kube-state-metrics:
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -230,7 +230,7 @@ kube-state-metrics:
|
||||
nodeExporter:
|
||||
enabled: {node_exporter}
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -238,16 +238,16 @@ nodeExporter:
|
||||
memory: 250Mi
|
||||
prometheus-node-exporter:
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 250Mi
|
||||
prometheusOperator:
|
||||
enabled: false
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -255,7 +255,7 @@ prometheusOperator:
|
||||
memory: 200Mi
|
||||
prometheusConfigReloader:
|
||||
resources:
|
||||
requests:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
limits:
|
||||
@ -267,7 +267,7 @@ prometheusOperator:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
patch:
|
||||
@ -275,7 +275,7 @@ prometheusOperator:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
"#,
|
||||
|
@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
||||
.expect("couldn't lock config")
|
||||
.additional_service_monitors = self.service_monitors.clone();
|
||||
Box::new(AlertingInterpret {
|
||||
sender: KubePrometheus::new(),
|
||||
sender: KubePrometheus { config },
|
||||
receivers: self.receivers.clone(),
|
||||
rules: self.rules.clone(),
|
||||
})
|
||||
|
@ -3,7 +3,7 @@ use std::str::FromStr;
|
||||
|
||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||
|
||||
pub fn ntfy_helm_chart_score(namespace: String) -> HelmChartScore {
|
||||
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
|
||||
let values = format!(
|
||||
r#"
|
||||
replicaCount: 1
|
||||
@ -28,12 +28,12 @@ service:
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
enabled: true
|
||||
# annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: ntfy.host.com
|
||||
- host: {host}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
@ -17,6 +17,7 @@ use crate::{
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NtfyScore {
|
||||
pub namespace: String,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
||||
@ -126,7 +127,7 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
ntfy_helm_chart_score(self.score.namespace.clone())
|
||||
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
|
||||
.create_interpret()
|
||||
.execute(inventory, topology)
|
||||
.await?;
|
||||
|
Loading…
Reference in New Issue
Block a user