ingress: check whether running as local k3d or kubeconfig

This commit is contained in:
Ian Letourneau 2025-09-08 20:43:12 -04:00
parent 288129b0c1
commit 54803c40a2
4 changed files with 41 additions and 26 deletions

View File

@ -1,11 +1,7 @@
use crate::topology::{PreparationError, k8s::K8sClient};
use async_trait::async_trait;
use std::sync::Arc;
use async_trait::async_trait;
use crate::{
interpret::InterpretError,
topology::{PreparationError, k8s::K8sClient},
};
#[async_trait]
pub trait Ingress {
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError>;

View File

@ -577,6 +577,10 @@ impl TenantManager for K8sAnywhereTopology {
impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError> {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => Ok("localhost".to_string()),
K8sSource::Kubeconfig => {
self.openshift_ingress_operator_available().await?;
let gvk = GroupVersionKind {
@ -585,13 +589,26 @@ impl Ingress for K8sAnywhereTopology {
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.get_resource_json_value(
"default",
Some("openshift-ingress-operator"),
&gvk,
)
.await
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
.map_err(|_| {
PreparationError::new("Failed to fetch IngressController".to_string())
})?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(domain.to_string()),
None => Err(PreparationError::new("Could not find domain".to_string())),
}
}
}
} else {
Err(PreparationError::new(
"Cannot get domain: unable to detect K8s state".to_string(),
))
}
}
}

View File

@ -13,7 +13,8 @@ use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score,
topology::{
ingress::Ingress, k8s::K8sClient, HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
};
use harmony_types::id::Id;

View File

@ -51,8 +51,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>,
}
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
for RHOBAlertingScore
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
Score<T> for RHOBAlertingScore
{
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret {
@ -77,8 +77,8 @@ pub struct RHOBAlertingInterpret {
}
#[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
for RHOBAlertingInterpret
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
Interpret<T> for RHOBAlertingInterpret
{
async fn execute(
&self,
@ -90,7 +90,7 @@ impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObs
self.install_prometheus(inventory, topology, &client)
.await?;
self.install_client_kube_metrics().await?;
self.install_grafana(inventory, topology,&client).await?;
self.install_grafana(inventory, topology, &client).await?;
self.install_receivers(&self.sender, &self.receivers)
.await?;
self.install_rules(&self.prometheus_rules, &client).await?;
@ -242,7 +242,7 @@ impl RHOBAlertingInterpret {
)))
}
async fn install_prometheus<T: Topology + K8sclient + Ingress >(
async fn install_prometheus<T: Topology + K8sclient + Ingress>(
&self,
inventory: &Inventory,
topology: &T,
@ -274,6 +274,7 @@ impl RHOBAlertingInterpret {
.apply(&stack, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology.get_domain(client.clone()).await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("{}-alert-manager", self.sender.namespace.clone());