Compare commits
1 Commits
9359d43fe1
...
eeaaa26d0e
| Author | SHA1 | Date | |
|---|---|---|---|
| eeaaa26d0e |
@@ -622,7 +622,7 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{debug, trace, warn};
|
||||
use log::{trace, debug, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
@@ -644,17 +644,9 @@ impl Ingress for K8sAnywhereTopology {
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
PreparationError::new(
|
||||
"Failed to fetch IngressController".to_string(),
|
||||
)
|
||||
})?;
|
||||
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||
|
||||
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
@@ -662,9 +654,7 @@ impl Ingress for K8sAnywhereTopology {
|
||||
warn!("OpenShift IngressController present but no status.domain set");
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"OpenShift ingress operator not detected; trying generic Kubernetes"
|
||||
);
|
||||
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
|
||||
}
|
||||
|
||||
// 2) Try NGINX Ingress Controller common setups
|
||||
@@ -678,9 +668,7 @@ impl Ingress for K8sAnywhereTopology {
|
||||
|
||||
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||
warn!(
|
||||
"Could not determine external ingress domain; falling back to internal-only DNS"
|
||||
);
|
||||
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
|
||||
let internal = format!("{service}.default.svc.cluster.local");
|
||||
Ok(internal)
|
||||
}
|
||||
@@ -694,7 +682,7 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||
use log::{debug, trace};
|
||||
use log::{trace, debug};
|
||||
|
||||
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||
let svc_gvk = GroupVersionKind {
|
||||
@@ -712,14 +700,8 @@ async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, Prepa
|
||||
|
||||
for (ns, name) in candidates {
|
||||
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||
if let Ok(svc) = client
|
||||
.get_resource_json_value(ns, Some(name), &svc_gvk)
|
||||
.await
|
||||
{
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"]
|
||||
.as_array()
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
|
||||
for entry in lb_hosts {
|
||||
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||
debug!("Found NGINX LB hostname: {host}");
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct Helm {
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
// pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
@@ -283,7 +283,7 @@ mod tests {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
api_versions: vec![],
|
||||
// api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
|
||||
@@ -86,7 +86,10 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||
info!("Argo CD installed cluster-wide in namespace '{}'.", ns);
|
||||
info!(
|
||||
"Argo CD installed cluster-wide in namespace '{}'.",
|
||||
ns
|
||||
);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||
|
||||
@@ -10,13 +10,11 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
webapp::Webapp,
|
||||
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
pub mod application;
|
||||
pub mod argocd;
|
||||
pub mod cert_manager;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
@@ -18,3 +17,4 @@ pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
pub mod tftp;
|
||||
pub mod argocd;
|
||||
|
||||
Reference in New Issue
Block a user