chore: Fix pr comments, documentation, slight refactor for better apis
All checks were successful
Run Check Script / check (pull_request) Successful in 49s

This commit is contained in:
2026-01-06 15:03:55 -05:00
parent 9f7b90d182
commit 9359d43fe1
7 changed files with 67 additions and 45 deletions

View File

@@ -22,7 +22,7 @@ use kube::{
}; };
use log::{debug, error, info, trace}; use log::{debug, error, info, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::{json, Value}; use serde_json::{Value, json};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::{io::AsyncReadExt, time::sleep};
@@ -58,8 +58,8 @@ impl K8sClient {
}) })
} }
// Returns true if any deployment in the given namespace matching the label selector /// Returns true if any deployment in the given namespace matching the label selector
// has status.availableReplicas > 0 (or condition Available=True). /// has status.availableReplicas > 0 (or condition Available=True).
pub async fn has_healthy_deployment_with_label( pub async fn has_healthy_deployment_with_label(
&self, &self,
namespace: &str, namespace: &str,
@@ -80,10 +80,10 @@ impl K8sClient {
} }
// Fallback: scan conditions // Fallback: scan conditions
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) { if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
if conds.iter().any(|c| { if conds
c.type_ == "Available" .iter()
&& c.status == "True" .any(|c| c.type_ == "Available" && c.status == "True")
}) { {
return Ok(true); return Ok(true);
} }
} }
@@ -91,8 +91,8 @@ impl K8sClient {
Ok(false) Ok(false)
} }
// Cluster-wide: returns namespaces that have at least one healthy deployment /// Cluster-wide: returns namespaces that have at least one healthy deployment
// matching the label selector (equivalent to kubectl -A -l ...). /// matching the label selector (equivalent to kubectl -A -l ...).
pub async fn list_namespaces_with_healthy_deployments( pub async fn list_namespaces_with_healthy_deployments(
&self, &self,
label_selector: &str, label_selector: &str,
@@ -119,10 +119,9 @@ impl K8sClient {
.as_ref() .as_ref()
.and_then(|s| s.conditions.as_ref()) .and_then(|s| s.conditions.as_ref())
.map(|conds| { .map(|conds| {
conds.iter().any(|c| { conds
c.type_ == "Available" .iter()
&& c.status == "True" .any(|c| c.type_ == "Available" && c.status == "True")
})
}) })
.unwrap_or(false) .unwrap_or(false)
}; };
@@ -134,8 +133,11 @@ impl K8sClient {
Ok(healthy_ns.into_keys().collect()) Ok(healthy_ns.into_keys().collect())
} }
// Get the application-controller ServiceAccount name (fallback to default) /// Get the application-controller ServiceAccount name (fallback to default)
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> { pub async fn get_controller_service_account_name(
&self,
ns: &str,
) -> Result<Option<String>, Error> {
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns); let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
let lp = ListParams::default().labels("app.kubernetes.io/component=controller"); let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
let list = api.list(&lp).await?; let list = api.list(&lp).await?;
@@ -146,10 +148,10 @@ impl K8sClient {
.and_then(|ds| ds.template.spec.as_ref()) .and_then(|ds| ds.template.spec.as_ref())
.and_then(|ps| ps.service_account_name.clone()) .and_then(|ps| ps.service_account_name.clone())
{ {
return Ok(sa); return Ok(Some(sa));
} }
} }
Ok("argocd-application-controller".to_string()) Ok(None)
} }
// List ClusterRoleBindings dynamically and return as JSON values // List ClusterRoleBindings dynamically and return as JSON values
@@ -170,10 +172,9 @@ impl K8sClient {
Ok(out) Ok(out)
} }
// Determine if Argo controller in ns has cluster-wide permissions via CRBs /// Determine if Argo controller in ns has cluster-wide permissions via CRBs
// TODO This does not belong in the generic k8s client, should be refactored at some point // TODO This does not belong in the generic k8s client, should be refactored at some point
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> { pub async fn is_service_account_cluster_wide(&self, sa: &str, ns: &str) -> Result<bool, Error> {
let sa = self.get_argocd_controller_sa_name(ns).await?;
let crbs = self.list_clusterrolebindings_json().await?; let crbs = self.list_clusterrolebindings_json().await?;
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa); let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
for crb in crbs { for crb in crbs {

View File

@@ -622,7 +622,7 @@ impl TenantManager for K8sAnywhereTopology {
#[async_trait] #[async_trait]
impl Ingress for K8sAnywhereTopology { impl Ingress for K8sAnywhereTopology {
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> { async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
use log::{trace, debug, warn}; use log::{debug, trace, warn};
let client = self.k8s_client().await?; let client = self.k8s_client().await?;
@@ -644,9 +644,17 @@ impl Ingress for K8sAnywhereTopology {
kind: "IngressController".into(), kind: "IngressController".into(),
}; };
let ic = client let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk) .get_resource_json_value(
"default",
Some("openshift-ingress-operator"),
&gvk,
)
.await .await
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?; .map_err(|_| {
PreparationError::new(
"Failed to fetch IngressController".to_string(),
)
})?;
if let Some(domain) = ic.data["status"]["domain"].as_str() { if let Some(domain) = ic.data["status"]["domain"].as_str() {
return Ok(format!("{service}.{domain}")); return Ok(format!("{service}.{domain}"));
@@ -654,7 +662,9 @@ impl Ingress for K8sAnywhereTopology {
warn!("OpenShift IngressController present but no status.domain set"); warn!("OpenShift IngressController present but no status.domain set");
} }
} else { } else {
trace!("OpenShift ingress operator not detected; trying generic Kubernetes"); trace!(
"OpenShift ingress operator not detected; trying generic Kubernetes"
);
} }
// 2) Try NGINX Ingress Controller common setups // 2) Try NGINX Ingress Controller common setups
@@ -668,7 +678,9 @@ impl Ingress for K8sAnywhereTopology {
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local) // 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
// We don't have tenant namespace here, so we fallback to 'default' with a warning. // We don't have tenant namespace here, so we fallback to 'default' with a warning.
warn!("Could not determine external ingress domain; falling back to internal-only DNS"); warn!(
"Could not determine external ingress domain; falling back to internal-only DNS"
);
let internal = format!("{service}.default.svc.cluster.local"); let internal = format!("{service}.default.svc.cluster.local");
Ok(internal) Ok(internal)
} }
@@ -682,7 +694,7 @@ impl Ingress for K8sAnywhereTopology {
} }
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> { async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
use log::{trace, debug}; use log::{debug, trace};
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx // Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
let svc_gvk = GroupVersionKind { let svc_gvk = GroupVersionKind {
@@ -700,8 +712,14 @@ async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, Prepa
for (ns, name) in candidates { for (ns, name) in candidates {
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname"); trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await { if let Ok(svc) = client
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default(); .get_resource_json_value(ns, Some(name), &svc_gvk)
.await
{
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"]
.as_array()
.cloned()
.unwrap_or_default();
for entry in lb_hosts { for entry in lb_hosts {
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) { if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
debug!("Found NGINX LB hostname: {host}"); debug!("Found NGINX LB hostname: {host}");

View File

@@ -21,7 +21,7 @@ pub struct Helm {
pub skip_schema_validation: Option<bool>, pub skip_schema_validation: Option<bool>,
pub version: Option<String>, pub version: Option<String>,
pub kube_version: Option<String>, pub kube_version: Option<String>,
// pub api_versions: Vec<String>, pub api_versions: Vec<String>,
pub namespace: Option<String>, pub namespace: Option<String>,
} }
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
// api_versions: vec![], api_versions: vec![],
namespace: None, namespace: None,
}, },
path: "".to_string(), path: "".to_string(),
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
// api_versions: vec![], api_versions: vec![],
namespace: None, namespace: None,
}, },
}, },
@@ -283,7 +283,7 @@ mod tests {
skip_schema_validation: None, skip_schema_validation: None,
version: None, version: None,
kube_version: None, kube_version: None,
// api_versions: vec![], api_versions: vec![],
namespace: None, namespace: None,
}, },
path: "".to_string(), path: "".to_string(),

View File

@@ -86,10 +86,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
(ns, false) (ns, false)
} }
ArgoDeploymentType::InstalledClusterWide(ns) => { ArgoDeploymentType::InstalledClusterWide(ns) => {
info!( info!("Argo CD installed cluster-wide in namespace '{}'.", ns);
"Argo CD installed cluster-wide in namespace '{}'.",
ns
);
(ns, false) (ns, false)
} }
ArgoDeploymentType::InstalledNamespaceScoped(ns) => { ArgoDeploymentType::InstalledNamespaceScoped(ns) => {

View File

@@ -10,11 +10,13 @@ use crate::{
data::Version, data::Version,
inventory::Inventory, inventory::Inventory,
modules::application::{ modules::application::{
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
webapp::Webapp,
}, },
score::Score, score::Score,
topology::{ topology::{
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
}, },
}; };
@@ -197,7 +199,6 @@ impl<
namespace: format!("{}", self.application.name()), namespace: format!("{}", self.application.name()),
openshift: true, openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig { argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.2.1").unwrap(), version: Version::from("0.2.1").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()), helm_chart_name: format!("{}-chart", self.application.name()),

View File

@@ -116,7 +116,12 @@ pub async fn discover_argo_all(
} }
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)"); trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
let sa = k8s
.get_controller_service_account_name(&ns)
.await?
.unwrap_or("argocd-application-controller".to_string());
let scope = match k8s.is_service_account_cluster_wide(&sa, &ns).await {
Ok(true) => { Ok(true) => {
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane"); debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
ArgoScope::ClusterWide(ns.to_string()) ArgoScope::ClusterWide(ns.to_string())

View File

@@ -1,4 +1,5 @@
pub mod application; pub mod application;
pub mod argocd;
pub mod cert_manager; pub mod cert_manager;
pub mod dhcp; pub mod dhcp;
pub mod dns; pub mod dns;
@@ -17,4 +18,3 @@ pub mod prometheus;
pub mod storage; pub mod storage;
pub mod tenant; pub mod tenant;
pub mod tftp; pub mod tftp;
pub mod argocd;