refactor: monitoring takes namespace from tenant
All checks were successful
Run Check Script / check (pull_request) Successful in -6s

This commit is contained in:
Willem 2025-07-02 11:13:03 -04:00
parent 460c8b59e1
commit 7de9860dcf
8 changed files with 408 additions and 145 deletions

11
Cargo.lock generated
View File

@ -1164,6 +1164,17 @@ dependencies = [
"url",
]
[[package]]
name = "example-monitoring-with-tenant"
version = "0.1.0"
dependencies = [
"cidr",
"harmony",
"harmony_cli",
"tokio",
"url",
]
[[package]]
name = "example-nanodc"
version = "0.1.0"

View File

@ -1,4 +1,3 @@
use cidr::Ipv4Cidr;
use harmony::{
data::Id,
inventory::Inventory,
@ -14,11 +13,9 @@ use harmony::{
},
topology::{
K8sAnywhereTopology, Url,
tenant::{InternetEgressPolicy, ResourceLimits, TenantConfig, TenantNetworkPolicy},
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
},
};
use std::net::Ipv4Addr;
use std::str::FromStr;
#[tokio::main]
async fn main() {
@ -27,7 +24,7 @@ async fn main() {
id: Id::from_string("1234".to_string()),
name: "test-tenant".to_string(),
resource_limits: ResourceLimits {
cpu_request_cores: 4.0,
cpu_request_cores: 6.0,
cpu_limit_cores: 4.0,
memory_request_gb: 4.0,
memory_limit_gb: 4.0,

View File

@ -112,8 +112,8 @@ impl K8sTenantManager {
"requests.storage": format!("{:.3}Gi", config.resource_limits.storage_total_gb),
"pods": "20",
"services": "10",
"configmaps": "30",
"secrets": "30",
"configmaps": "60",
"secrets": "60",
"persistentvolumeclaims": "15",
"services.loadbalancers": "2",
"services.nodeports": "5",
@ -137,30 +137,63 @@ impl K8sTenantManager {
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"name": format!("{}-network-policy", config.name),
"name": format!("{}-network-policy", config.name)
},
"spec": {
"podSelector": {},
"egress": [
{ "to": [ {"podSelector": {}}]},
{ "to":
[
{
"to": [
{ "podSelector": {} }
]
},
{
"to": [
{
"podSelector": {},
"namespaceSelector": {
"matchLabels": {
"kubernetes.io/metadata.name":"openshift-dns"
"kubernetes.io/metadata.name": "kube-system"
}
}
}
},
]
},
{ "to": [
{
"to": [
{
"podSelector": {},
"namespaceSelector": {
"matchLabels": {
"kubernetes.io/metadata.name": "openshift-dns"
}
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "10.43.0.1/32",
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "172.23.0.0/16",
}
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "0.0.0.0/0",
// See https://en.wikipedia.org/wiki/Reserved_IP_addresses
"except": [
"10.0.0.0/8",
"172.16.0.0/12",
@ -173,29 +206,30 @@ impl K8sTenantManager {
"169.254.0.0/16",
"203.0.113.0/24",
"127.0.0.0/8",
// Not sure we should block this one as it is
// used for multicast. But better block more than less.
"224.0.0.0/4",
"240.0.0.0/4",
"100.64.0.0/10",
"233.252.0.0/24",
"0.0.0.0/8",
],
"0.0.0.0/8"
]
}
}
]
},
}
],
"ingress": [
{ "from": [ {"podSelector": {}}]}
{
"from": [
{ "podSelector": {} }
]
}
],
"policyTypes": [
"Ingress", "Egress",
"Ingress",
"Egress"
]
}
});
let mut network_policy: NetworkPolicy =
serde_json::from_value(network_policy).map_err(|e| {
ExecutorError::ConfigurationError(format!(

View File

@ -31,21 +31,21 @@ impl KubePrometheusConfig {
Self {
namespace: None,
default_rules: true,
windows_monitoring: false,
windows_monitoring: true,
alert_manager: true,
grafana: true,
node_exporter: false,
node_exporter: true,
prometheus: true,
kubernetes_service_monitors: true,
kubernetes_api_server: false,
kubelet: false,
kube_controller_manager: false,
kube_etcd: false,
kube_proxy: false,
kubernetes_api_server: true,
kubelet: true,
kube_controller_manager: true,
kube_etcd: true,
kube_proxy: true,
kube_state_metrics: true,
prometheus_operator: true,
core_dns: false,
kube_scheduler: false,
core_dns: true,
kube_scheduler: true,
alert_receiver_configs: vec![],
alert_rules: vec![],
}

View File

@ -12,8 +12,8 @@ use crate::modules::{
helm::chart::HelmChartScore,
monitoring::kube_prometheus::types::{
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig,
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, Cpu, CpuUnit, Limits, Memory,
MemoryUnit, Requests, Resources,
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits, Requests,
Resources,
},
};
@ -39,24 +39,12 @@ pub fn kube_prometheus_helm_chart_score(
let prometheus = config.prometheus.to_string();
let resource_limit = Resources {
limits: Limits {
memory: Memory {
value: 100,
unit: MemoryUnit::Mi,
},
cpu: Cpu {
value: 100,
unit: CpuUnit::Milli,
},
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
requests: Requests {
memory: Memory {
value: 100,
unit: MemoryUnit::Mi,
},
cpu: Cpu {
value: 100,
unit: CpuUnit::Milli,
},
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
};
@ -83,7 +71,13 @@ pub fn kube_prometheus_helm_chart_score(
prometheus:
enabled: {prometheus}
prometheusSpec:
{resource_section}
resources:
requests:
cpu: 100m
memory: 500Mi
limits:
cpu: 200m
memory: 1000Mi
defaultRules:
create: {default_rules}
rules:
@ -123,42 +117,147 @@ defaultRules:
windows: true
windowsMonitoring:
enabled: {windows_monitoring}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
grafana:
enabled: {grafana}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
initChownData:
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
cpu: 50m
memory: 100Mi
sidecar:
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
cpu: 50m
memory: 100Mi
kubernetesServiceMonitors:
enabled: {kubernetes_service_monitors}
{resource_section}
kubeApiServer:
enabled: {kubernetes_api_server}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubelet:
enabled: {kubelet}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubeControllerManager:
enabled: {kube_controller_manager}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
coreDns:
enabled: {core_dns}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubeEtcd:
enabled: {kube_etcd}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubeScheduler:
enabled: {kube_scheduler}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubeProxy:
enabled: {kube_proxy}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
kubeStateMetrics:
enabled: {kube_state_metrics}
{resource_section}
kube-state-metrics:
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
nodeExporter:
enabled: {node_exporter}
{resource_section}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
prometheus-node-exporter:
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 200m
memory: 250Mi
prometheusOperator:
enabled: {prometheus_operator}
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 100m
memory: 200Mi
prometheusConfigReloader:
resources:
requests:
cpu: 100m
memory: 150Mi
limits:
cpu: 100m
memory: 200Mi
admissionWebhooks:
deployment:
resources:
@ -218,28 +317,28 @@ prometheusOperator:
alertmanager: AlertManager {
enabled: config.alert_manager,
config: alert_manager_channel_config,
alertManagerSpec: AlertManagerSpec {
alertmanager_spec: AlertManagerSpec {
resources: Resources {
limits: Limits {
memory: Memory {
value: 100,
unit: MemoryUnit::Mi,
},
cpu: Cpu {
value: 100,
unit: CpuUnit::Milli,
},
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
requests: Requests {
memory: Memory {
value: 100,
unit: MemoryUnit::Mi,
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
cpu: Cpu {
value: 100,
unit: CpuUnit::Milli,
},
},
init_config_reloader: ConfigReloader {
resources: Resources {
limits: Limits {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
requests: Requests {
memory: "100Mi".to_string(),
cpu: "100m".to_string(),
},
},
},
},

View File

@ -1,2 +1,3 @@
pub mod config;
pub mod kube_prometheus_helm_chart;
pub mod types;

View File

@ -0,0 +1,142 @@
// // in your build_score function...
//
// // --- Step 1: Define the structs that match the ENTIRE values.yaml structure ---
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct FullValues {
// // Top-level keys for each component
// prometheus: Prometheus,
// grafana: Grafana,
// alertmanager: Alertmanager,
// kube_state_metrics: KubeStateMetrics,
// prometheus_operator: PrometheusOperator,
// // Add other components like nodeExporter if you enable them
//
// // Key for your custom rules
// additional_prometheus_rules_map: AlertManagerAdditionalPromRules,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct Prometheus {
// enabled: bool,
// prometheus_spec: PrometheusSpec,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct PrometheusSpec {
// resources: K8sResources,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct Grafana {
// enabled: bool,
// resources: K8sResources,
// sidecar: GrafanaSidecar,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct GrafanaSidecar {
// resources: K8sResources,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct Alertmanager {
// enabled: bool,
// config: AlertManagerConfig, // Your existing struct for this
// alert_manager_spec: AlertManagerSpec,
// }
//
// #[derive(Serialize, Debug)]
// #[serde(rename_all = "camelCase")]
// struct AlertManagerSpec {
// resources: K8sResources,
// // You will need to add a `config_reloader` field here for its resources
// }
//
// // Define KubeStateMetrics, PrometheusOperator, etc. in the same way
// // ...
//
// // Your K8sResources struct (flat, with strings)
// #[derive(Serialize, Debug)]
// struct K8sResources {
// requests: ResourceValues,
// limits: ResourceValues,
// }
//
// #[derive(Serialize, Debug)]
// struct ResourceValues {
// cpu: String,
// memory: String,
// }
//
//
// // --- Step 2: Populate the single, unified struct ---
//
// // Prepare your alertmanager config
// let mut alert_manager_channel_config = build_your_alert_manager_config(); // Your existing logic
//
// // Prepare your custom rules
// let merged_rules = build_your_prometheus_rules(); // Your existing logic
//
// // Define the resource profiles
// let heavy_res = K8sResources { /* ... */ };
// let medium_res = K8sResources { /* ... */ };
// let light_res = K8sResources { /* ... */ };
//
// // Create the single source of truth for your values
// let full_values = FullValues {
// prometheus: Prometheus {
// enabled: config.prometheus,
// prometheus_spec: PrometheusSpec {
// resources: heavy_res,
// },
// },
// grafana: Grafana {
// enabled: config.grafana,
// resources: medium_res,
// sidecar: GrafanaSidecar {
// resources: light_res,
// },
// },
// alertmanager: Alertmanager {
// enabled: config.alert_manager,
// config: alert_manager_channel_config,
// alert_manager_spec: AlertManagerSpec {
// resources: light_res,
// // You'd add the config_reloader resources here
// },
// },
// kube_state_metrics: KubeStateMetrics {
// enabled: config.kube_state_metrics,
// resources: medium_res,
// },
// prometheus_operator: PrometheusOperator {
// enabled: config.prometheus_operator,
// resources: light_res,
// // ... and so on for its sidecars
// },
// additional_prometheus_rules_map: merged_rules,
// };
//
//
// // --- Step 3: Serialize the single struct ONCE ---
//
// let final_values_yaml = serde_yaml::to_string(&full_values)
// .expect("Failed to serialize final values YAML");
//
// debug!("full values.yaml: \n {:#}", final_values_yaml);
//
//
// // --- Step 4: Use the final string in your Helm score ---
//
// HelmChartScore {
// // ...
// values_yaml: Some(final_values_yaml),
// // ...
// }

View File

@ -16,10 +16,17 @@ pub struct AlertManagerValues {
pub alertmanager: AlertManager,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlertManager {
pub enabled: bool,
pub config: AlertManagerConfig,
pub alertManagerSpec: AlertManagerSpec,
pub alertmanager_spec: AlertManagerSpec,
pub init_config_reloader: ConfigReloader,
}
#[derive(Debug, Clone, Serialize)]
pub struct ConfigReloader {
pub resources: Resources,
}
#[derive(Debug, Clone, Serialize)]
@ -45,6 +52,7 @@ pub struct AlertManagerChannelConfig {
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlertManagerSpec {
pub(crate) resources: Resources,
}
@ -57,43 +65,14 @@ pub struct Resources {
#[derive(Debug, Clone, Serialize)]
pub struct Limits {
pub memory: Memory,
pub cpu: Cpu,
pub memory: String,
pub cpu: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct Requests {
pub memory: Memory,
pub cpu: Cpu,
}
#[derive(Debug, Clone, Serialize)]
pub struct Memory {
pub value: u64,
pub unit: MemoryUnit,
}
#[derive(Debug, Clone, Serialize)]
pub struct Cpu {
pub value: u64,
pub unit: CpuUnit,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum MemoryUnit {
Ki,
Mi,
Gi,
Ti,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum CpuUnit {
// 1 = 1 core, m = millicore
Core,
Milli,
pub memory: String,
pub cpu: String,
}
#[derive(Debug, Clone, Serialize)]