feat: add service monitors support to prom #66

Merged
johnride merged 9 commits from monitoring_servicemonitor into master 2025-07-02 15:29:23 +00:00
6 changed files with 57 additions and 21 deletions
Showing only changes of commit 5c22fc8823 - Show all commits

1
Cargo.lock generated
View File

@ -1160,6 +1160,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"harmony", "harmony",
"harmony_cli", "harmony_cli",
"harmony_macros",
"tokio", "tokio",
"url", "url",
] ]

View File

@ -8,5 +8,6 @@ license.workspace = true
[dependencies] [dependencies]
harmony = { version = "0.1.0", path = "../../harmony" } harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
harmony_macros = { version = "0.1.0", path = "../../harmony_macros" }
tokio.workspace = true tokio.workspace = true
url.workspace = true url.workspace = true

View File

@ -18,6 +18,8 @@ use harmony::{
topology::{K8sAnywhereTopology, Url}, topology::{K8sAnywhereTopology, Url},
}; };
use harmony_macros::http_scheme;
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
let discord_receiver = DiscordWebhook { let discord_receiver = DiscordWebhook {
@ -41,9 +43,34 @@ async fn main() {
], ],
); );
let service_monitor_endpoint = ServiceMonitorEndpoint {
port: Some(80),
target_port: None,
bearer_token_file: None,
interval: None,
path: "/metrics".to_string(),
scheme: http_scheme!("http"),
tls_config: None,
metric_relabelings: vec![],
relabelings: vec![],
};
let service_monitor = ServiceMonitor {
name: "test service monitor".to_string(),
additional_labels: None,
job_label: None,
target_labels: vec![],
pod_target_labels: vec![],
selector: None,
match_labels: None,
namespace_selector: None,
endpoints: vec![service_monitor_endpoint],
fallback_scrape_protocol: None,
};
let alerting_score = HelmPrometheusAlertingScore { let alerting_score = HelmPrometheusAlertingScore {
receivers: vec![Box::new(discord_receiver)], receivers: vec![Box::new(discord_receiver)],
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)], rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
service_monitors: vec![service_monitor],
}; };
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize( let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
Inventory::autoload(), Inventory::autoload(),

View File

@ -101,12 +101,14 @@ nodeExporter:
enabled: {node_exporter} enabled: {node_exporter}
prometheusOperator: prometheusOperator:
enabled: {prometheus_operator} enabled: {prometheus_operator}
"#, "#,
); );
let prometheus_config = let prometheus_config =
crate::modules::monitoring::kube_prometheus::types::PrometheusConfigValues { crate::modules::monitoring::kube_prometheus::types::PrometheusConfigValues {
prometheus: PrometheusConfig { prometheus: PrometheusConfig {
prometheus, prometheus: bool::from_str(prometheus.as_str()).expect("couldn't parse bool"),
additional_service_monitors: config.additional_service_monitors.clone(), additional_service_monitors: config.additional_service_monitors.clone(),
}, },
}; };

View File

@ -4,6 +4,7 @@ use serde::Serialize;
use super::{helm::config::KubePrometheusConfig, prometheus::Prometheus}; use super::{helm::config::KubePrometheusConfig, prometheus::Prometheus};
use crate::{ use crate::{
modules::monitoring::kube_prometheus::types::ServiceMonitor,
score::Score, score::Score,
topology::{ topology::{
HelmCommand, Topology, HelmCommand, Topology,
@ -15,14 +16,18 @@ use crate::{
pub struct HelmPrometheusAlertingScore { pub struct HelmPrometheusAlertingScore {
pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>, pub receivers: Vec<Box<dyn AlertReceiver<Prometheus>>>,
pub rules: Vec<Box<dyn AlertRule<Prometheus>>>, pub rules: Vec<Box<dyn AlertRule<Prometheus>>>,
pub service_monitors: Vec<ServiceMonitor>,
} }
impl<T: Topology + HelmCommand> Score<T> for HelmPrometheusAlertingScore { impl<T: Topology + HelmCommand> Score<T> for HelmPrometheusAlertingScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
config
.try_lock()
.expect("couldn't lock config")
.additional_service_monitors = self.service_monitors.clone();
Box::new(AlertingInterpret { Box::new(AlertingInterpret {
sender: Prometheus { sender: Prometheus { config },
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
},
receivers: self.receivers.clone(), receivers: self.receivers.clone(),
rules: self.rules.clone(), rules: self.rules.clone(),
}) })

View File

@ -64,7 +64,7 @@ pub struct PrometheusConfigValues {
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct PrometheusConfig { pub struct PrometheusConfig {
pub prometheus: String, pub prometheus: bool,
pub additional_service_monitors: Vec<ServiceMonitor>, pub additional_service_monitors: Vec<ServiceMonitor>,
} }
@ -73,23 +73,23 @@ pub struct PrometheusConfig {
pub struct ServiceMonitorTLSConfig { pub struct ServiceMonitorTLSConfig {
// ## Path to the CA file // ## Path to the CA file
// ## // ##
pub ca_file: String, pub ca_file: Option<String>,
// ## Path to client certificate file // ## Path to client certificate file
// ## // ##
pub cert_file: String, pub cert_file: Option<String>,
// ## Skip certificate verification // ## Skip certificate verification
// ## // ##
pub insecure_skip_verify: bool, pub insecure_skip_verify: Option<bool>,
// ## Path to client key file // ## Path to client key file
// ## // ##
pub key_file: String, pub key_file: Option<String>,
// ## Server name used to verify host name // ## Server name used to verify host name
// ## // ##
pub server_name: String, pub server_name: Option<String>,
} }
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
@ -97,19 +97,19 @@ pub struct ServiceMonitorTLSConfig {
pub struct ServiceMonitorEndpoint { pub struct ServiceMonitorEndpoint {
// ## Name of the endpoint's service port // ## Name of the endpoint's service port
// ## Mutually exclusive with targetPort // ## Mutually exclusive with targetPort
pub port: String, pub port: Option<u16>,
// ## Name or number of the endpoint's target port // ## Name or number of the endpoint's target port

This should be a specific type that validates the path

This should be a specific type that validates the path
// ## Mutually exclusive with port // ## Mutually exclusive with port
pub target_port: String, pub target_port: Option<u16>,
// ## File containing bearer token to be used when scraping targets // ## File containing bearer token to be used when scraping targets
taha marked this conversation as resolved Outdated

This should be an enum :

pub enum URLScheme {
 HTTP,
 HTTPS,
 // Maybe others such as :
 FILE,
 FTP,
 OTHER(String), // With this we are both usable with more frequent schemes and extensible 
}

impl Display for URLScheme {
 // TODO
}
 
This should be an enum : ```rust pub enum URLScheme { HTTP, HTTPS, // Maybe others such as : FILE, FTP, OTHER(String), // With this we are both usable with more frequent schemes and extensible } impl Display for URLScheme { // TODO }
// ## // ##
pub bearer_token_file: String, pub bearer_token_file: Option<String>,
// ## Interval at which metrics should be scraped // ## Interval at which metrics should be scraped
// ## // ##
pub interval: String, pub interval: Option<String>,
// ## HTTP path to scrape for metrics // ## HTTP path to scrape for metrics
// ## // ##
@ -121,7 +121,7 @@ pub struct ServiceMonitorEndpoint {
// ## TLS configuration to use when scraping the endpoint // ## TLS configuration to use when scraping the endpoint
// ## // ##
pub tls_config: ServiceMonitorTLSConfig, pub tls_config: Option<ServiceMonitorTLSConfig>,
// ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. // ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
// ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig // ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
@ -149,11 +149,11 @@ pub struct ServiceMonitor {
pub name: String, pub name: String,
// # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart // # Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from the chart
pub additional_labels: Mapping, pub additional_labels: Option<Mapping>,
// # Service label for use in assembling a job name of the form <label value>-<port> // # Service label for use in assembling a job name of the form <label value>-<port>
// # If no label is specified, the service name is used. // # If no label is specified, the service name is used.
pub job_label: String, pub job_label: Option<String>,
// # labels to transfer from the kubernetes service to the target // # labels to transfer from the kubernetes service to the target

I think we already do have a Label type somewhere. I think it would be more appropriate here than String. That's true for all the label related fields in this file.

This Label type might not be fully compatible in its current form/place but it is definitely a semantic that we will see very often in various use cases and implementations. I think it is worth for us to maintain a Label type which we can eventually provide very interesting functionnality for such as search, tracking, matching, versionning, etc.

I think we already do have a Label type somewhere. I think it would be more appropriate here than String. That's true for all the label related fields in this file. This Label type might not be fully compatible in its current form/place but it is definitely a semantic that we will see very often in various use cases and implementations. I think it is worth for us to maintain a Label type which we can eventually provide very interesting functionnality for such as search, tracking, matching, versionning, etc.
pub target_labels: Vec<String>, pub target_labels: Vec<String>,
@ -170,22 +170,22 @@ pub struct ServiceMonitor {
// values: // values:
// - example-service-1 // - example-service-1
// - example-service-2 // - example-service-2
pub selector: Mapping, pub selector: Option<Mapping>,
// # label selector for services // # label selector for services
pub match_labels: Mapping, pub match_labels: Option<Mapping>,
// # Namespaces from which services are selected // # Namespaces from which services are selected
// # Match any namespace // # Match any namespace
// any: bool, // any: bool,
// # Explicit list of namespace names to select // # Explicit list of namespace names to select
// matchNames: Vec, // matchNames: Vec,
pub namespace_selector: Mapping, pub namespace_selector: Option<Mapping>,
// # Endpoints of the selected service to be monitored // # Endpoints of the selected service to be monitored
pub endpoints: Vec<ServiceMonitorEndpoint>, pub endpoints: Vec<ServiceMonitorEndpoint>,
// # Fallback scrape protocol used by Prometheus for scraping metrics // # Fallback scrape protocol used by Prometheus for scraping metrics
// # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol // # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
pub fallback_scrape_protocol: String, pub fallback_scrape_protocol: Option<String>,
} }