Compare commits

..

3 Commits

Author SHA1 Message Date
5e1580e5c1 Merge branch 'master' into doc/clone 2025-10-23 19:32:26 +00:00
008b03f979 fix: changed documentation language to english 2025-10-23 14:56:07 -04:00
e5eb7fde9f doc to clone and transfer a coreos disk
All checks were successful
Run Check Script / check (pull_request) Successful in 1m11s
2025-10-09 15:29:09 -04:00
19 changed files with 272 additions and 644 deletions

View File

@ -0,0 +1,127 @@
## Working procedure to clone and restore CoreOS disk from OKD Cluster
### **Step 1 - take a backup**
```
sudo dd if=/dev/old of=/dev/backup status=progress
```
### **Step 2 - clone beginning of old disk to new**
```
sudo dd if=/dev/old of=/dev/backup status=progress count=1000Mib
```
### **Step 3 - verify and modify disk partitions**
list disk partitions
```
sgdisk -p /dev/new
```
if new disk is smaller than old disk and there is space on the xfs partition of the old disk, modify partitions of new disk
```
gdisk /dev/new
```
inside of gdisk commands
```
-v -> verify table
-p -> print table
-d -> select partition to delete partition
-n -> recreate partition with same partition number as deleted partition
```
For end sector, either specify the new end or just press Enter for maximum available
When asked about partition type, enter the same type code (it will show the old one)
```
p - >to verify
w -> to write
```
make xfs file system for new partition <new4>
```
sudo mkfs.xfs -f /dev/new4
```
### **Step 4 - copy old PARTUUID **
**careful here**
get old patuuid:
```
sgdisk -i <partition_number> /dev/old_disk # Note the "Partition unique GUID"
```
get labels
```
sgdisk -p /dev/old_disk # Shows partition names in the table
blkid /dev/old_disk* # Shows PARTUUIDs and labels for all partitions
```
set it on new disk
```
sgdisk -u <partition_number>:<old_partuuid> /dev/sdc
```
partition name:
```
sgdisk -c <partition_number>:"<old_name>" /dev/sdc
```
verify all:
```
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/old_disk
```
### **Step 5 - Mount disks and copy files from old to new disk**
mount files before copy:
```
mkdir -p /mnt/new
mkdir -p /mnt/old
mount /dev/old4 /mnt/old
mount /dev/new4 /mnt/new
```
copy:
```
rsync -aAXHv --numeric-ids /source/ /destination/
```
### **Step 6 - Set correct UUID for new partition 4**
to set correct uuid for partition 4
```
blkid /dev/old4
```
```
xfs_admin -U <old_uuid> /dev/new_partition
```
to set labels
get it
```
sgdisk -i 4 /dev/sda | grep "Partition name"
```
set it
```
sgdisk -c 4:"<label_name>" /dev/sdc
or
(check existing with xfs_admin -l /dev/old_partition)
Use xfs_admin -L <label> /dev/new_partition
```
### **Step 7 - Verify**
verify everything:
```
sgdisk -p /dev/sda # Old disk
sgdisk -p /dev/sdc # New disk
```
```
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sda
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sdc
```
```
blkid /dev/sda* | grep UUID=
blkid /dev/sdc* | grep UUID=
```
## **Step 8 - Unmount devices and Finalize job**
unmount old devices
```
umount /mnt/new
umount /mnt/old
```
shutdown swap disk and verify it worked

View File

@ -3,7 +3,7 @@ use harmony::{
modules::{ modules::{
application::{ application::{
ApplicationScore, RustWebFramework, RustWebapp, ApplicationScore, RustWebFramework, RustWebapp,
features::{Monitoring, PackagingDeployment}, features::{PackagingDeployment, rhob_monitoring::Monitoring},
}, },
monitoring::alert_channel::discord_alert_channel::DiscordWebhook, monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
}, },

View File

@ -3,10 +3,7 @@ use std::time::Duration;
use derive_new::new; use derive_new::new;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
api::{ api::{apps::v1::Deployment, core::v1::Pod},
apps::v1::Deployment,
core::v1::{Pod, ServiceAccount},
},
apimachinery::pkg::version::Info, apimachinery::pkg::version::Info,
}; };
use kube::{ use kube::{
@ -24,7 +21,7 @@ use kube::{
}; };
use log::{debug, error, info, trace}; use log::{debug, error, info, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::json; use serde_json::{Value, json};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::{io::AsyncReadExt, time::sleep};
@ -60,11 +57,6 @@ impl K8sClient {
}) })
} }
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
api
}
pub async fn get_apiserver_version(&self) -> Result<Info, Error> { pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
let client: Client = self.client.clone(); let client: Client = self.client.clone();
let version_info: Info = client.apiserver_version().await?; let version_info: Info = client.apiserver_version().await?;

View File

@ -1,12 +1,7 @@
use std::{collections::BTreeMap, process::Command, sync::Arc}; use std::{process::Command, sync::Arc};
use async_trait::async_trait; use async_trait::async_trait;
use base64::{Engine, engine::general_purpose}; use kube::api::GroupVersionKind;
use k8s_openapi::api::{
core::v1::Secret,
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
};
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
use log::{debug, info, warn}; use log::{debug, info, warn};
use serde::Serialize; use serde::Serialize;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
@ -17,26 +12,14 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
k3d::K3DInstallationScore, k3d::K3DInstallationScore,
k8s::ingress::{K8sIngressScore, PathType}, monitoring::kube_prometheus::crd::{
monitoring::{ crd_alertmanager_config::CRDPrometheus,
grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score}, prometheus_operator::prometheus_operator_helm_chart_score,
kube_prometheus::crd::{ rhob_alertmanager_config::RHOBObservability,
crd_alertmanager_config::CRDPrometheus,
crd_grafana::{
Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
},
crd_prometheuses::LabelSelector,
prometheus_operator::prometheus_operator_helm_chart_score,
rhob_alertmanager_config::RHOBObservability,
service_monitor::ServiceMonitor,
},
}, },
prometheus::{ prometheus::{
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore, k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore, prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
}, },
}, },
score::Score, score::Score,
@ -103,172 +86,41 @@ impl K8sclient for K8sAnywhereTopology {
} }
#[async_trait] #[async_trait]
impl Grafana for K8sAnywhereTopology { impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn ensure_grafana_operator(
&self,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> {
debug!("ensure grafana operator");
let client = self.k8s_client().await.unwrap();
let grafana_gvk = GroupVersionKind {
group: "grafana.integreatly.org".to_string(),
version: "v1beta1".to_string(),
kind: "Grafana".to_string(),
};
let name = "grafanas.grafana.integreatly.org";
let ns = "grafana";
let grafana_crd = client
.get_resource_json_value(name, Some(ns), &grafana_gvk)
.await;
match grafana_crd {
Ok(_) => {
return Ok(PreparationOutcome::Success {
details: "Found grafana CRDs in cluster".to_string(),
});
}
Err(_) => {
return self
.install_grafana_operator(inventory, Some("grafana"))
.await;
}
};
}
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
let ns = "grafana";
let mut label = BTreeMap::new();
label.insert("dashboards".to_string(), "grafana".to_string());
let label_selector = LabelSelector {
match_labels: label.clone(),
match_expressions: vec![],
};
let client = self.k8s_client().await?;
let grafana = self.build_grafana(ns, &label);
client.apply(&grafana, Some(ns)).await?;
//TODO change this to a ensure ready or something better than just a timeout
client
.wait_until_deployment_ready(
"grafana-grafana-deployment".to_string(),
Some("grafana"),
Some(30),
)
.await?;
let sa_name = "grafana-grafana-sa";
let token_secret_name = "grafana-sa-token-secret";
let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
client.apply(&sa_token_secret, Some(ns)).await?;
let secret_gvk = GroupVersionKind {
group: "".to_string(),
version: "v1".to_string(),
kind: "Secret".to_string(),
};
let secret = client
.get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
.await?;
let token = format!(
"Bearer {}",
self.extract_and_normalize_token(&secret).unwrap()
);
debug!("creating grafana clusterrole binding");
let clusterrolebinding =
self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
client.apply(&clusterrolebinding, Some(ns)).await?;
debug!("creating grafana datasource crd");
let thanos_url = format!(
"https://{}",
self.get_domain("thanos-querier-openshift-monitoring")
.await
.unwrap()
);
let thanos_openshift_datasource = self.build_grafana_datasource(
"thanos-openshift-monitoring",
ns,
&label_selector,
&thanos_url,
&token,
);
client.apply(&thanos_openshift_datasource, Some(ns)).await?;
debug!("creating grafana dashboard crd");
let dashboard = self.build_grafana_dashboard(ns, &label_selector);
client.apply(&dashboard, Some(ns)).await?;
debug!("creating grafana ingress");
let grafana_ingress = self.build_grafana_ingress(ns).await;
grafana_ingress
.interpret(&Inventory::empty(), self)
.await
.map_err(|e| PreparationError::new(e.to_string()))?;
Ok(PreparationOutcome::Success {
details: "Installed grafana composants".to_string(),
})
}
}
#[async_trait]
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &CRDPrometheus, sender: &CRDPrometheus,
_inventory: &Inventory, inventory: &Inventory,
_receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
let client = self.k8s_client().await?;
for monitor in sender.service_monitor.iter() {
client
.apply(monitor, Some(&sender.namespace))
.await
.map_err(|e| PreparationError::new(e.to_string()))?;
}
Ok(PreparationOutcome::Success {
details: "successfuly installed prometheus components".to_string(),
})
}
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
_inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> { ) -> Result<PreparationOutcome, PreparationError> {
let po_result = self.ensure_prometheus_operator(sender).await?; let po_result = self.ensure_prometheus_operator(sender).await?;
match po_result { if po_result == PreparationOutcome::Noop {
PreparationOutcome::Success { details: _ } => { debug!("Skipping Prometheus CR installation due to missing operator.");
debug!("Detected prometheus crds operator present in cluster."); return Ok(po_result);
return Ok(po_result); }
}
PreparationOutcome::Noop => { let result = self
debug!("Skipping Prometheus CR installation due to missing operator."); .get_k8s_prometheus_application_score(sender.clone(), receivers)
return Ok(po_result); .await
} .interpret(inventory, self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
} }
} }
} }
#[async_trait] #[async_trait]
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology { impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &RHOBObservability, sender: &RHOBObservability,
@ -302,14 +154,6 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
Err(err) => Err(PreparationError::new(err.to_string())), Err(err) => Err(PreparationError::new(err.to_string())),
} }
} }
async fn ensure_prometheus_operator(
&self,
sender: &RHOBObservability,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError> {
todo!()
}
} }
impl Serialize for K8sAnywhereTopology { impl Serialize for K8sAnywhereTopology {
@ -371,180 +215,6 @@ impl K8sAnywhereTopology {
.await .await
} }
fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
let token_b64 = secret
.data
.get("token")
.or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
.and_then(|v| v.as_str())?;
let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
let s = String::from_utf8(bytes).ok()?;
let cleaned = s
.trim_matches(|c: char| c.is_whitespace() || c == '\0')
.to_string();
Some(cleaned)
}
pub fn build_cluster_rolebinding(
&self,
service_account_name: &str,
clusterrole_name: &str,
ns: &str,
) -> ClusterRoleBinding {
ClusterRoleBinding {
metadata: ObjectMeta {
name: Some(format!("{}-view-binding", service_account_name)),
..Default::default()
},
role_ref: RoleRef {
api_group: "rbac.authorization.k8s.io".into(),
kind: "ClusterRole".into(),
name: clusterrole_name.into(),
},
subjects: Some(vec![Subject {
kind: "ServiceAccount".into(),
name: service_account_name.into(),
namespace: Some(ns.into()),
..Default::default()
}]),
}
}
pub fn build_sa_token_secret(
&self,
secret_name: &str,
service_account_name: &str,
ns: &str,
) -> Secret {
let mut annotations = BTreeMap::new();
annotations.insert(
"kubernetes.io/service-account.name".to_string(),
service_account_name.to_string(),
);
Secret {
metadata: ObjectMeta {
name: Some(secret_name.into()),
namespace: Some(ns.into()),
annotations: Some(annotations),
..Default::default()
},
type_: Some("kubernetes.io/service-account-token".to_string()),
..Default::default()
}
}
fn build_grafana_datasource(
&self,
name: &str,
ns: &str,
label_selector: &LabelSelector,
url: &str,
token: &str,
) -> GrafanaDatasource {
let mut json_data = BTreeMap::new();
json_data.insert("timeInterval".to_string(), "5s".to_string());
GrafanaDatasource {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(ns.to_string()),
..Default::default()
},
spec: GrafanaDatasourceSpec {
instance_selector: label_selector.clone(),
allow_cross_namespace_import: Some(true),
values_from: None,
datasource: GrafanaDatasourceConfig {
access: "proxy".to_string(),
name: name.to_string(),
r#type: "prometheus".to_string(),
url: url.to_string(),
database: None,
json_data: Some(GrafanaDatasourceJsonData {
time_interval: Some("60s".to_string()),
http_header_name1: Some("Authorization".to_string()),
tls_skip_verify: Some(true),
oauth_pass_thru: Some(true),
}),
secure_json_data: Some(GrafanaDatasourceSecureJsonData {
http_header_value1: Some(format!("Bearer {token}")),
}),
is_default: Some(false),
editable: Some(true),
},
},
}
}
fn build_grafana_dashboard(
&self,
ns: &str,
label_selector: &LabelSelector,
) -> GrafanaDashboard {
let graf_dashboard = GrafanaDashboard {
metadata: ObjectMeta {
name: Some(format!("grafana-dashboard-{}", ns)),
namespace: Some(ns.to_string()),
..Default::default()
},
spec: GrafanaDashboardSpec {
resync_period: Some("30s".to_string()),
instance_selector: label_selector.clone(),
datasources: Some(vec![GrafanaDashboardDatasource {
input_name: "DS_PROMETHEUS".to_string(),
datasource_name: "thanos-openshift-monitoring".to_string(),
}]),
json: None,
grafana_com: Some(GrafanaCom {
id: 17406,
revision: None,
}),
},
};
graf_dashboard
}
fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
let grafana = GrafanaCRD {
metadata: ObjectMeta {
name: Some(format!("grafana-{}", ns)),
namespace: Some(ns.to_string()),
labels: Some(labels.clone()),
..Default::default()
},
spec: GrafanaSpec {
config: None,
admin_user: None,
admin_password: None,
ingress: None,
persistence: None,
resources: None,
},
};
grafana
}
async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
let name = format!("{}-grafana", ns);
let backend_service = format!("grafana-{}-service", ns);
K8sIngressScore {
name: fqdn::fqdn!(&name),
host: fqdn::fqdn!(&domain),
backend_service: fqdn::fqdn!(&backend_service),
port: 3000,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn::fqdn!(&ns)),
ingress_class_name: Some("openshift-default".to_string()),
}
}
async fn get_cluster_observability_operator_prometheus_application_score( async fn get_cluster_observability_operator_prometheus_application_score(
&self, &self,
sender: RHOBObservability, sender: RHOBObservability,
@ -562,14 +232,13 @@ impl K8sAnywhereTopology {
&self, &self,
sender: CRDPrometheus, sender: CRDPrometheus,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
service_monitors: Option<Vec<ServiceMonitor>>,
) -> K8sPrometheusCRDAlertingScore { ) -> K8sPrometheusCRDAlertingScore {
return K8sPrometheusCRDAlertingScore { K8sPrometheusCRDAlertingScore {
sender, sender,
receivers: receivers.unwrap_or_default(), receivers: receivers.unwrap_or_default(),
service_monitors: service_monitors.unwrap_or_default(), service_monitors: vec![],
prometheus_rules: vec![], prometheus_rules: vec![],
}; }
} }
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> { async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
@ -837,30 +506,6 @@ impl K8sAnywhereTopology {
details: "prometheus operator present in cluster".into(), details: "prometheus operator present in cluster".into(),
}) })
} }
async fn install_grafana_operator(
&self,
inventory: &Inventory,
ns: Option<&str>,
) -> Result<PreparationOutcome, PreparationError> {
let namespace = ns.unwrap_or("grafana");
info!("installing grafana operator in ns {namespace}");
let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
let mut namespace_scope = false;
if tenant.is_some() {
namespace_scope = true;
}
let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
.interpret(inventory, self)
.await
.map_err(|e| PreparationError::new(e.to_string()));
Ok(PreparationOutcome::Success {
details: format!(
"Successfully installed grafana operator in ns {}",
ns.unwrap()
),
})
}
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]

View File

@ -31,7 +31,6 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
debug!("hit sender configure for AlertingInterpret");
self.sender.configure(inventory, topology).await?; self.sender.configure(inventory, topology).await?;
for receiver in self.receivers.iter() { for receiver in self.receivers.iter() {
receiver.install(&self.sender).await?; receiver.install(&self.sender).await?;
@ -87,5 +86,4 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
#[async_trait] #[async_trait]
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync { pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>; async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
} }

View File

@ -2,11 +2,7 @@ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::grafana::grafana::Grafana;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};
use crate::topology::MultiTargetTopology; use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress; use crate::topology::ingress::Ingress;
use crate::{ use crate::{
@ -18,7 +14,7 @@ use crate::{
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
}; };
use crate::{ use crate::{
modules::prometheus::prometheus::PrometheusMonitoring, modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver, topology::oberservability::monitoring::AlertReceiver,
}; };
use async_trait::async_trait; use async_trait::async_trait;
@ -26,7 +22,6 @@ use base64::{Engine as _, engine::general_purpose};
use harmony_secret::SecretManager; use harmony_secret::SecretManager;
use harmony_secret_derive::Secret; use harmony_secret_derive::Secret;
use harmony_types::net::Url; use harmony_types::net::Url;
use kube::api::ObjectMeta;
use log::{debug, info}; use log::{debug, info};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc; use std::sync::Arc;
@ -45,8 +40,7 @@ impl<
+ TenantManager + TenantManager
+ K8sclient + K8sclient
+ MultiTargetTopology + MultiTargetTopology
+ PrometheusMonitoring<CRDPrometheus> + PrometheusApplicationMonitoring<CRDPrometheus>
+ Grafana
+ Ingress + Ingress
+ std::fmt::Debug, + std::fmt::Debug,
> ApplicationFeature<T> for Monitoring > ApplicationFeature<T> for Monitoring
@ -63,20 +57,10 @@ impl<
.unwrap_or_else(|| self.application.name()); .unwrap_or_else(|| self.application.name());
let domain = topology.get_domain("ntfy").await.unwrap(); let domain = topology.get_domain("ntfy").await.unwrap();
let app_service_monitor = ServiceMonitor {
metadata: ObjectMeta {
name: Some(self.application.name()),
namespace: Some(namespace.clone()),
..Default::default()
},
spec: ServiceMonitorSpec::default(),
};
let mut alerting_score = ApplicationMonitoringScore { let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus { sender: CRDPrometheus {
namespace: namespace.clone(), namespace: namespace.clone(),
client: topology.k8s_client().await.unwrap(), client: topology.k8s_client().await.unwrap(),
service_monitor: vec![app_service_monitor],
}, },
application: self.application.clone(), application: self.application.clone(),
receivers: self.alert_receiver.clone(), receivers: self.alert_receiver.clone(),

View File

@ -18,7 +18,7 @@ use crate::{
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
}; };
use crate::{ use crate::{
modules::prometheus::prometheus::PrometheusMonitoring, modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver, topology::oberservability::monitoring::AlertReceiver,
}; };
use async_trait::async_trait; use async_trait::async_trait;
@ -42,7 +42,7 @@ impl<
+ MultiTargetTopology + MultiTargetTopology
+ Ingress + Ingress
+ std::fmt::Debug + std::fmt::Debug
+ PrometheusMonitoring<RHOBObservability>, + PrometheusApplicationMonitoring<RHOBObservability>,
> ApplicationFeature<T> for Monitoring > ApplicationFeature<T> for Monitoring
{ {
async fn ensure_installed( async fn ensure_installed(

View File

@ -1,23 +1,21 @@
use std::sync::Arc; use std::sync::Arc;
use log::debug; use async_trait::async_trait;
use serde::Serialize; use serde::Serialize;
use crate::{ use crate::{
interpret::Interpret, data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
modules::{ modules::{
application::Application, application::Application,
monitoring::{ monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus, prometheus::prometheus::PrometheusApplicationMonitoring,
},
prometheus::prometheus::PrometheusMonitoring,
}, },
score::Score, score::Score,
topology::{ topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
K8sclient, Topology,
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
},
}; };
use harmony_types::id::Id;
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct ApplicationMonitoringScore { pub struct ApplicationMonitoringScore {
@ -26,16 +24,12 @@ pub struct ApplicationMonitoringScore {
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>, pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
} }
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T> impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for ApplicationMonitoringScore for ApplicationMonitoringScore
{ {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
debug!("creating alerting interpret"); Box::new(ApplicationMonitoringInterpret {
Box::new(AlertingInterpret { score: self.clone(),
sender: self.sender.clone(),
receivers: self.receivers.clone(),
rules: vec![],
scrape_targets: None,
}) })
} }
@ -46,3 +40,55 @@ impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Sc
) )
} }
} }
#[derive(Debug)]
pub struct ApplicationMonitoringInterpret {
score: ApplicationMonitoringScore,
}
#[async_trait]
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for ApplicationMonitoringInterpret
{
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => {
Ok(Outcome::noop("Prometheus installation skipped".into()))
}
},
Err(err) => Err(InterpretError::from(err)),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}

View File

@ -12,7 +12,7 @@ use crate::{
monitoring::kube_prometheus::crd::{ monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability, crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
}, },
prometheus::prometheus::PrometheusMonitoring, prometheus::prometheus::PrometheusApplicationMonitoring,
}, },
score::Score, score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver}, topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>, pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
} }
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T> impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for ApplicationRHOBMonitoringScore for ApplicationRHOBMonitoringScore
{ {
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T> impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for ApplicationRHOBMonitoringInterpret for ApplicationRHOBMonitoringInterpret
{ {
async fn execute( async fn execute(

View File

@ -1,17 +0,0 @@
use async_trait::async_trait;
use k8s_openapi::Resource;
use crate::{
inventory::Inventory,
topology::{PreparationError, PreparationOutcome},
};
#[async_trait]
pub trait Grafana {
async fn ensure_grafana_operator(
&self,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError>;
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
}

View File

@ -1,28 +1,27 @@
use harmony_macros::hurl;
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use std::{collections::HashMap, str::FromStr}; use std::str::FromStr;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository}; use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = r#"
rbac:
namespaced: true
sidecar:
dashboards:
enabled: true
"#
.to_string();
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
let mut values_overrides = HashMap::new();
values_overrides.insert(
NonBlankString::from_str("namespaceScope").unwrap(),
namespace_scope.to_string(),
);
HelmChartScore { HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()), namespace: Some(NonBlankString::from_str(ns).unwrap()),
release_name: NonBlankString::from_str("grafana-operator").unwrap(), release_name: NonBlankString::from_str("grafana").unwrap(),
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(), chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
chart_version: None, chart_version: None,
values_overrides: Some(values_overrides), values_overrides: None,
values_yaml: None, values_yaml: Some(values.to_string()),
create_namespace: true, create_namespace: true,
install_only: true, install_only: true,
repository: Some(HelmRepository::new( repository: None,
"grafana".to_string(),
hurl!("https://grafana.github.io/helm-charts"),
true,
)),
} }
} }

View File

@ -1,2 +1 @@
pub mod grafana;
pub mod helm; pub mod helm;

View File

@ -1,25 +1,12 @@
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::topology::{
interpret::{InterpretError, Outcome}, k8s::K8sClient,
inventory::Inventory, oberservability::monitoring::{AlertReceiver, AlertSender},
modules::{
monitoring::{
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
},
prometheus::prometheus::PrometheusMonitoring,
},
topology::{
K8sclient, Topology,
installable::Installable,
k8s::K8sClient,
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
},
}; };
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
@ -39,7 +26,6 @@ pub struct AlertmanagerConfigSpec {
pub struct CRDPrometheus { pub struct CRDPrometheus {
pub namespace: String, pub namespace: String,
pub client: Arc<K8sClient>, pub client: Arc<K8sClient>,
pub service_monitor: Vec<ServiceMonitor>,
} }
impl AlertSender for CRDPrometheus { impl AlertSender for CRDPrometheus {
@ -54,12 +40,6 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
} }
} }
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
fn clone(&self) -> Self {
self.clone_box()
}
}
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> { impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where where
@ -68,24 +48,3 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
todo!() todo!()
} }
} }
#[async_trait]
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
for CRDPrometheus
{
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
topology.ensure_grafana_operator(inventory).await?;
topology.ensure_prometheus_operator(self, inventory).await?;
Ok(())
}
async fn ensure_installed(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<(), InterpretError> {
topology.install_grafana().await?;
topology.install_prometheus(&self, inventory, None).await?;
Ok(())
}
}

View File

@ -103,34 +103,9 @@ pub struct GrafanaDashboardSpec {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub resync_period: Option<String>, pub resync_period: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
pub instance_selector: LabelSelector, pub instance_selector: LabelSelector,
#[serde(default, skip_serializing_if = "Option::is_none")] pub json: String,
pub json: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub grafana_com: Option<GrafanaCom>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDashboardDatasource {
pub input_name: String,
pub datasource_name: String,
}
// ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaCom {
pub id: u32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub revision: Option<u32>,
} }
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
@ -151,79 +126,20 @@ pub struct GrafanaDatasourceSpec {
pub allow_cross_namespace_import: Option<bool>, pub allow_cross_namespace_import: Option<bool>,
pub datasource: GrafanaDatasourceConfig, pub datasource: GrafanaDatasourceConfig,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub values_from: Option<Vec<GrafanaValueFrom>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaValueFrom {
pub target_path: String,
pub value_from: GrafanaValueSource,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaValueSource {
pub secret_key_ref: GrafanaSecretKeyRef,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaSecretKeyRef {
pub name: String,
pub key: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceConfig { pub struct GrafanaDatasourceConfig {
pub access: String, pub access: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<String>, pub database: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<BTreeMap<String, String>>,
pub name: String, pub name: String,
pub r#type: String, pub r#type: String,
pub url: String, pub url: String,
/// Represents jsonData in the GrafanaDatasource spec
#[serde(default, skip_serializing_if = "Option::is_none")]
pub json_data: Option<GrafanaDatasourceJsonData>,
/// Represents secureJsonData (secrets)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub editable: Option<bool>,
} }
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceJsonData {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time_interval: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub http_header_name1: Option<String>,
/// Disable TLS skip verification (false = verify)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls_skip_verify: Option<bool>,
/// Auth type - set to "forward" for OpenShift OAuth identity
#[serde(default, skip_serializing_if = "Option::is_none")]
pub oauth_pass_thru: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct GrafanaDatasourceSecureJsonData {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub http_header_value1: Option<String>,
}
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)] #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]

View File

@ -114,7 +114,7 @@ impl Prometheus {
}; };
if let Some(ns) = namespace.as_deref() { if let Some(ns) = namespace.as_deref() {
grafana_helm_chart_score(ns, false) grafana_helm_chart_score(ns)
.interpret(inventory, topology) .interpret(inventory, topology)
.await .await
} else { } else {

View File

@ -73,8 +73,4 @@ impl ScrapeTarget<CRDPrometheus> for Server {
self.name.clone() self.name.clone()
))) )))
} }
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
Box::new(self.clone())
}
} }

View File

@ -12,8 +12,7 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules; use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{ use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig, Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec, GrafanaDatasourceSpec, GrafanaSpec,
GrafanaValueFrom, GrafanaValueSource,
}; };
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{ use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup, PrometheusRule, PrometheusRuleSpec, RuleGroup,
@ -40,7 +39,7 @@ use crate::{
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
use super::prometheus::PrometheusMonitoring; use super::prometheus::PrometheusApplicationMonitoring;
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct K8sPrometheusCRDAlertingScore { pub struct K8sPrometheusCRDAlertingScore {
@ -50,7 +49,7 @@ pub struct K8sPrometheusCRDAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
for K8sPrometheusCRDAlertingScore for K8sPrometheusCRDAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
@ -76,7 +75,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
for K8sPrometheusCRDAlertingInterpret for K8sPrometheusCRDAlertingInterpret
{ {
async fn execute( async fn execute(
@ -467,13 +466,10 @@ impl K8sPrometheusCRDAlertingInterpret {
match_labels: label.clone(), match_labels: label.clone(),
match_expressions: vec![], match_expressions: vec![],
}; };
let mut json_data = BTreeMap::new();
json_data.insert("timeInterval".to_string(), "5s".to_string());
let namespace = self.sender.namespace.clone(); let namespace = self.sender.namespace.clone();
let json_data = GrafanaDatasourceJsonData {
time_interval: Some("5s".to_string()),
http_header_name1: None,
tls_skip_verify: Some(true),
oauth_pass_thru: Some(true),
};
let json = build_default_dashboard(&namespace); let json = build_default_dashboard(&namespace);
let graf_data_source = GrafanaDatasource { let graf_data_source = GrafanaDatasource {
@ -499,11 +495,7 @@ impl K8sPrometheusCRDAlertingInterpret {
"http://prometheus-operated.{}.svc.cluster.local:9090", "http://prometheus-operated.{}.svc.cluster.local:9090",
self.sender.namespace.clone() self.sender.namespace.clone()
), ),
secure_json_data: None,
is_default: None,
editable: None,
}, },
values_from: None,
}, },
}; };
@ -524,9 +516,7 @@ impl K8sPrometheusCRDAlertingInterpret {
spec: GrafanaDashboardSpec { spec: GrafanaDashboardSpec {
resync_period: Some("30s".to_string()), resync_period: Some("30s".to_string()),
instance_selector: labels.clone(), instance_selector: labels.clone(),
json: Some(json), json,
grafana_com: None,
datasources: None,
}, },
}; };

View File

@ -9,17 +9,11 @@ use crate::{
}; };
#[async_trait] #[async_trait]
pub trait PrometheusMonitoring<S: AlertSender> { pub trait PrometheusApplicationMonitoring<S: AlertSender> {
async fn install_prometheus( async fn install_prometheus(
&self, &self,
sender: &S, sender: &S,
inventory: &Inventory, inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>, receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
) -> Result<PreparationOutcome, PreparationError>; ) -> Result<PreparationOutcome, PreparationError>;
async fn ensure_prometheus_operator(
&self,
sender: &S,
inventory: &Inventory,
) -> Result<PreparationOutcome, PreparationError>;
} }

View File

@ -38,7 +38,7 @@ use crate::{
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
use super::prometheus::PrometheusMonitoring; use super::prometheus::PrometheusApplicationMonitoring;
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct RHOBAlertingScore { pub struct RHOBAlertingScore {
@ -48,8 +48,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingScore Score<T> for RHOBAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret { Box::new(RHOBAlertingInterpret {
@ -74,8 +74,8 @@ pub struct RHOBAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingInterpret Interpret<T> for RHOBAlertingInterpret
{ {
async fn execute( async fn execute(
&self, &self,