refactor/ns #74
@ -7,7 +7,13 @@ use harmony::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus::alerts::{
|
||||
infra::dell_server::{
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
data::Id,
|
||||
inventory::Inventory,
|
||||
@ -6,7 +8,13 @@ use harmony::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus::alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
tenant::TenantScore,
|
||||
@ -44,9 +52,31 @@ async fn main() {
|
||||
let additional_rules =
|
||||
AlertManagerRuleGroup::new("pvc-alerts", vec![high_pvc_fill_rate_over_two_days_alert]);
|
||||
|
||||
let service_monitor_endpoint = ServiceMonitorEndpoint {
|
||||
port: Some("80".to_string()),
|
||||
path: "/metrics".to_string(),
|
||||
scheme: HTTPScheme::HTTP,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let service_monitor = ServiceMonitor {
|
||||
name: "test-service-monitor".to_string(),
|
||||
selector: Selector {
|
||||
match_labels: HashMap::new(),
|
||||
match_expressions: vec![MatchExpression {
|
||||
key: "test".to_string(),
|
||||
operator: Operator::In,
|
||||
values: vec!["test-service".to_string()],
|
||||
}],
|
||||
},
|
||||
endpoints: vec![service_monitor_endpoint],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules)],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
let mut maestro = Maestro::<K8sAnywhereTopology>::initialize(
|
||||
Inventory::autoload(),
|
||||
|
||||
@ -4,7 +4,7 @@ use crate::{interpret::InterpretError, inventory::Inventory};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Installable<T>: Send + Sync {
|
||||
fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
|
||||
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError>;
|
||||
|
||||
async fn ensure_installed(
|
||||
&self,
|
||||
|
||||
@ -39,7 +39,6 @@ pub struct K8sAnywhereTopology {
|
||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
||||
config: Arc<K8sAnywhereConfig>,
|
||||
tenant_manager_config: OnceCell<TenantConfig>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@ -74,7 +73,6 @@ impl K8sAnywhereTopology {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
||||
tenant_manager_config: OnceCell::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,7 +81,6 @@ impl K8sAnywhereTopology {
|
||||
k8s_state: Arc::new(OnceCell::new()),
|
||||
tenant_manager: Arc::new(OnceCell::new()),
|
||||
config: Arc::new(config),
|
||||
tenant_manager_config: OnceCell::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,16 +196,10 @@ impl K8sAnywhereTopology {
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
Ok(K8sTenantManager::new(k8s_client))
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn store_tenant_config(&self, config: TenantConfig) {
|
||||
self.tenant_manager_config
|
||||
.get_or_init(|| async { config })
|
||||
.await;
|
||||
}
|
||||
|
||||
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
|
||||
match self.tenant_manager.get() {
|
||||
@ -289,13 +280,15 @@ impl HelmCommand for K8sAnywhereTopology {}
|
||||
#[async_trait]
|
||||
impl TenantManager for K8sAnywhereTopology {
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||
self.store_tenant_config(config.clone()).await;
|
||||
self.get_k8s_tenant_manager()?
|
||||
.provision_tenant(config)
|
||||
.await
|
||||
}
|
||||
|
||||
fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.tenant_manager_config.get().cloned()
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.get_k8s_tenant_manager()
|
||||
.ok()?
|
||||
.get_tenant_config()
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,7 +27,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.sender.configure(inventory, topology)?;
|
||||
self.sender.configure(inventory, topology).await?;
|
||||
for receiver in self.receivers.iter() {
|
||||
receiver.install(&self.sender).await?;
|
||||
}
|
||||
|
||||
@ -5,7 +5,6 @@ use crate::{
|
||||
topology::k8s::{ApplyStrategy, K8sClient},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use k8s_openapi::{
|
||||
api::{
|
||||
core::v1::{LimitRange, Namespace, ResourceQuota},
|
||||
@ -19,12 +18,23 @@ use kube::Resource;
|
||||
use log::{debug, info, warn};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use super::{TenantConfig, TenantManager};
|
||||
|
||||
#[derive(new, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct K8sTenantManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
pub fn new(client: Arc<K8sClient>) -> Self {
|
||||
Self {
|
||||
k8s_client: client,
|
||||
k8s_tenant_config: Arc::new(OnceCell::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl K8sTenantManager {
|
||||
@ -147,7 +157,7 @@ impl K8sTenantManager {
|
||||
"spec": {
|
||||
"limits": [
|
||||
{
|
||||
"type": "Container",
|
||||
"type": "Container",
|
||||
"default": {
|
||||
"cpu": "500m",
|
||||
"memory": "500Mi"
|
||||
@ -391,6 +401,9 @@ impl K8sTenantManager {
|
||||
|
||||
Ok(network_policy)
|
||||
}
|
||||
fn store_config(&self, config: &TenantConfig) {
|
||||
let _ = self.k8s_tenant_config.set(config.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@ -419,9 +432,10 @@ impl TenantManager for K8sTenantManager {
|
||||
"Success provisionning K8s tenant id {} name {}",
|
||||
config.id, config.name
|
||||
);
|
||||
self.store_config(config);
|
||||
Ok(())
|
||||
}
|
||||
fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
todo!()
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
||||
self.k8s_tenant_config.get().cloned()
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,5 +16,5 @@ pub trait TenantManager {
|
||||
/// * `config`: The desired configuration for the new tenant.
|
||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError>;
|
||||
|
||||
fn get_tenant_config(&self) -> Option<TenantConfig>;
|
||||
async fn get_tenant_config(&self) -> Option<TenantConfig>;
|
||||
}
|
||||
|
||||
@ -11,7 +11,9 @@ use std::{
|
||||
use crate::modules::{
|
||||
helm::chart::HelmChartScore,
|
||||
monitoring::kube_prometheus::types::{
|
||||
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig, AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits, PrometheusConfig, Requests, Resources
|
||||
AlertGroup, AlertManager, AlertManagerAdditionalPromRules, AlertManagerConfig,
|
||||
AlertManagerRoute, AlertManagerSpec, AlertManagerValues, ConfigReloader, Limits,
|
||||
PrometheusConfig, Requests, Resources,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@ -35,8 +35,8 @@ impl AlertSender for Prometheus {
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + TenantManager> Installable<T> for Prometheus {
|
||||
fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
self.configure_with_topology(topology);
|
||||
async fn configure(&self, _inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||
self.configure_with_topology(topology).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -62,9 +62,10 @@ impl Prometheus {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
||||
pub async fn configure_with_topology<T: TenantManager>(&self, topology: &T) {
|
||||
let ns = topology
|
||||
|
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.map(|cfg| cfg.name.clone())
|
||||
.unwrap_or_else(|| "monitoring".to_string());
|
||||
error!("This must be refactored, see comments in pr #74");
|
||||
|
||||
Loading…
Reference in New Issue
Block a user
Higher level components such as Monitoring, Alerting, etc. Should not be aware of the Tenant concept.
The topology itself should manage internally the logic related to the tenant.
error!("This must be refactored, see comments in pr #74");