wip: added ingress scores for install grafana and install prometheusadded ingress capability to k8s anywhere topology

need to get the domain name dynamically from the topology when building the app to insert into the helm chart
This commit is contained in:
Willem 2025-09-08 16:16:01 -04:00
parent fedb346548
commit 288129b0c1
9 changed files with 148 additions and 20 deletions

View File

@ -0,0 +1,12 @@
use std::sync::Arc;
use async_trait::async_trait;
use crate::{
interpret::InterpretError,
topology::{PreparationError, k8s::K8sClient},
};
#[async_trait]
pub trait Ingress {
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError>;
}

View File

@ -1,6 +1,7 @@
use std::{process::Command, sync::Arc};
use async_trait::async_trait;
use kube::api::GroupVersionKind;
use log::{debug, info, warn};
use serde::Serialize;
use tokio::sync::OnceCell;
@ -22,6 +23,7 @@ use crate::{
},
},
score::Score,
topology::ingress::Ingress,
};
use super::{
@ -198,6 +200,26 @@ impl K8sAnywhereTopology {
}
}
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
let client = self.k8s_client().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 {
return Ok(());
} else {
return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(),
));
}
}
fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm")
.arg("version")
@ -550,3 +572,26 @@ impl TenantManager for K8sAnywhereTopology {
.await
}
}
#[async_trait]
impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError> {
self.openshift_ingress_operator_available().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(domain.to_string()),
None => Err(PreparationError::new("Could not find domain".to_string())),
}
}
}

View File

@ -1,4 +1,5 @@
mod ha_cluster;
pub mod ingress;
use harmony_types::net::IpAddress;
mod host_binding;
mod http;

View File

@ -10,11 +10,10 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
features::{ArgoApplication, ArgoHelmScore}, ApplicationFeature, HelmPackage, OCICompliant
},
score::Score,
topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
topology::{ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
};
/// ContinuousDelivery in Harmony provides this functionality :
@ -136,7 +135,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
#[async_trait]
impl<
A: OCICompliant + HelmPackage + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for ContinuousDelivery<A>
{
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {

View File

@ -13,7 +13,7 @@ use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
ingress::Ingress, k8s::K8sClient, HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology
},
};
use harmony_types::id::Id;
@ -27,7 +27,7 @@ pub struct ArgoHelmScore {
pub argo_apps: Vec<ArgoApplication>,
}
impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(ArgoInterpret {
score: self.clone(),
@ -47,16 +47,14 @@ pub struct ArgoInterpret {
}
#[async_trait]
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let k8s_client = topology.k8s_client().await?;
let domain = self
.get_host_domain(k8s_client.clone(), self.score.openshift)
.await?;
let domain = topology.get_domain(k8s_client.clone()).await?;
let domain = format!("argo.{domain}");
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);

View File

@ -411,6 +411,7 @@ impl RustWebapp {
fn create_helm_chart_files(
&self,
image_url: &str,
topology: &T,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_name = format!("{}-chart", self.name);
let chart_dir = self
@ -422,6 +423,9 @@ impl RustWebapp {
fs::create_dir_all(&templates_dir)?;
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
//TODO need to find a way to use topology to get the domain
let domain = topology.get_domain(client.clone()).await?;
// Create Chart.yaml
let chart_yaml = format!(
@ -464,17 +468,17 @@ ingress:
# Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx
hosts:
- host: chart-example.local
- host: {}
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: {}-tls
hosts:
- chart-example.local
- {}
"#,
chart_name, image_repo, image_tag, self.service_port, self.name
chart_name, image_repo, image_tag, self.service_port, domain, self.name
);
fs::write(chart_dir.join("values.yaml"), values_yaml)?;

View File

@ -40,6 +40,7 @@ pub struct K8sIngressScore {
pub path: Option<IngressPath>,
pub path_type: Option<PathType>,
pub namespace: Option<fqdn::FQDN>,
pub ingress_class_name: Option<String>,
}
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
@ -54,12 +55,20 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
None => PathType::Prefix,
};
let ingress_class = match self.ingress_class_name.clone() {
Some(ingress_class_name) => {
ingress_class_name
}
None => format!("\"default\""),
};
let ingress = json!(
{
"metadata": {
"name": self.name.to_string(),
},
"spec": {
"ingressClassName": ingress_class.as_str(),
"rules": [
{ "host": self.host.to_string(),
"http": {

View File

@ -147,6 +147,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
port: 8080,
path: Some(ingress_path),
path_type: None,
ingress_class_name: None,
namespace: self
.get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())),

View File

@ -1,3 +1,4 @@
use fqdn::fqdn;
use std::fs;
use std::{collections::BTreeMap, sync::Arc};
use tempfile::tempdir;
@ -8,6 +9,7 @@ use log::{debug, info};
use serde::Serialize;
use std::process::Command;
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
@ -29,6 +31,7 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
ServiceMonitor, ServiceMonitorSpec,
};
use crate::score::Score;
use crate::topology::ingress::Ingress;
use crate::topology::oberservability::monitoring::AlertReceiver;
use crate::topology::{K8sclient, Topology, k8s::K8sClient};
use crate::{
@ -48,7 +51,7 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>,
}
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for RHOBAlertingScore
{
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
@ -74,19 +77,20 @@ pub struct RHOBAlertingInterpret {
}
#[async_trait]
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for RHOBAlertingInterpret
{
async fn execute(
&self,
_inventory: &Inventory,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.ensure_grafana_operator().await?;
self.install_prometheus(&client).await?;
self.install_prometheus(inventory, topology, &client)
.await?;
self.install_client_kube_metrics().await?;
self.install_grafana(&client).await?;
self.install_grafana(inventory, topology,&client).await?;
self.install_receivers(&self.sender, &self.receivers)
.await?;
self.install_rules(&self.prometheus_rules, &client).await?;
@ -238,7 +242,12 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
async fn install_prometheus<T: Topology + K8sclient + Ingress >(
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
debug!(
"installing crd-prometheuses in namespace {}",
self.sender.namespace.clone()
@ -265,6 +274,36 @@ impl RHOBAlertingInterpret {
.apply(&stack, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology.get_domain(client.clone()).await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("{}-alert-manager", self.sender.namespace.clone());
let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&domain),
backend_service: fqdn!(&backend_service),
port: 9093,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = format!("{}-prometheus", self.sender.namespace.clone());
let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&domain),
backend_service: fqdn!(&backend_service),
port: 9090,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
alert_manager_ingress.interpret(inventory, topology).await?;
prometheus_ingress.interpret(inventory, topology).await?;
info!("installed rhob monitoring stack",);
Ok(Outcome::success(format!(
"successfully deployed rhob-prometheus {:#?}",
@ -379,7 +418,12 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
async fn install_grafana<T: Topology + K8sclient + Ingress>(
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut label = BTreeMap::new();
label.insert("dashboards".to_string(), "grafana".to_string());
let labels = LabelSelector {
@ -465,6 +509,21 @@ impl RHOBAlertingInterpret {
.apply(&grafana, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology.get_domain(client.clone()).await?;
let name = format!("{}-grafana", self.sender.namespace.clone());
let backend_service = format!("{}-grafana", self.sender.namespace.clone());
let grafana_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&domain),
backend_service: fqdn!(&backend_service),
port: 9090,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
grafana_ingress.interpret(inventory, topology).await?;
Ok(Outcome::success(format!(
"successfully deployed grafana instance {:#?}",
grafana.metadata.name