ingress: check whether running as local k3d or kubeconfig

This commit is contained in:
Ian Letourneau 2025-09-08 20:43:12 -04:00
parent 288129b0c1
commit 54803c40a2
4 changed files with 41 additions and 26 deletions

View File

@ -1,11 +1,7 @@
use crate::topology::{PreparationError, k8s::K8sClient};
use async_trait::async_trait;
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait;
use crate::{
interpret::InterpretError,
topology::{PreparationError, k8s::K8sClient},
};
#[async_trait] #[async_trait]
pub trait Ingress { pub trait Ingress {
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError>; async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError>;

View File

@ -577,21 +577,38 @@ impl TenantManager for K8sAnywhereTopology {
impl Ingress for K8sAnywhereTopology { impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea //TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError> { async fn get_domain(&self, client: Arc<K8sClient>) -> Result<String, PreparationError> {
self.openshift_ingress_operator_available().await?; if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => Ok("localhost".to_string()),
K8sSource::Kubeconfig => {
self.openshift_ingress_operator_available().await?;
let gvk = GroupVersionKind { let gvk = GroupVersionKind {
group: "operator.openshift.io".into(), group: "operator.openshift.io".into(),
version: "v1".into(), version: "v1".into(),
kind: "IngressController".into(), kind: "IngressController".into(),
}; };
let ic = client let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk) .get_resource_json_value(
.await "default",
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?; Some("openshift-ingress-operator"),
&gvk,
)
.await
.map_err(|_| {
PreparationError::new("Failed to fetch IngressController".to_string())
})?;
match ic.data["status"]["domain"].as_str() { match ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(domain.to_string()), Some(domain) => Ok(domain.to_string()),
None => Err(PreparationError::new("Could not find domain".to_string())), None => Err(PreparationError::new("Could not find domain".to_string())),
}
}
}
} else {
Err(PreparationError::new(
"Cannot get domain: unable to detect K8s state".to_string(),
))
} }
} }
} }

View File

@ -13,7 +13,8 @@ use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score, score::Score,
topology::{ topology::{
ingress::Ingress, k8s::K8sClient, HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
}, },
}; };
use harmony_types::id::Id; use harmony_types::id::Id;

View File

@ -51,8 +51,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Score<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingScore Score<T> for RHOBAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret { Box::new(RHOBAlertingInterpret {
@ -77,8 +77,8 @@ pub struct RHOBAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T> impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
for RHOBAlertingInterpret Interpret<T> for RHOBAlertingInterpret
{ {
async fn execute( async fn execute(
&self, &self,
@ -90,7 +90,7 @@ impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObs
self.install_prometheus(inventory, topology, &client) self.install_prometheus(inventory, topology, &client)
.await?; .await?;
self.install_client_kube_metrics().await?; self.install_client_kube_metrics().await?;
self.install_grafana(inventory, topology,&client).await?; self.install_grafana(inventory, topology, &client).await?;
self.install_receivers(&self.sender, &self.receivers) self.install_receivers(&self.sender, &self.receivers)
.await?; .await?;
self.install_rules(&self.prometheus_rules, &client).await?; self.install_rules(&self.prometheus_rules, &client).await?;
@ -242,7 +242,7 @@ impl RHOBAlertingInterpret {
))) )))
} }
async fn install_prometheus<T: Topology + K8sclient + Ingress >( async fn install_prometheus<T: Topology + K8sclient + Ingress>(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
@ -274,6 +274,7 @@ impl RHOBAlertingInterpret {
.apply(&stack, Some(&self.sender.namespace.clone())) .apply(&stack, Some(&self.sender.namespace.clone()))
.await .await
.map_err(|e| InterpretError::new(e.to_string()))?; .map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology.get_domain(client.clone()).await?; let domain = topology.get_domain(client.clone()).await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone()); let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("{}-alert-manager", self.sender.namespace.clone()); let backend_service = format!("{}-alert-manager", self.sender.namespace.clone());