chore: Fix pr comments, documentation, slight refactor for better apis
All checks were successful
Run Check Script / check (pull_request) Successful in 49s
All checks were successful
Run Check Script / check (pull_request) Successful in 49s
This commit is contained in:
@@ -22,7 +22,7 @@ use kube::{
|
||||
};
|
||||
use log::{debug, error, info, trace};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::{Value, json};
|
||||
use similar::TextDiff;
|
||||
use tokio::{io::AsyncReadExt, time::sleep};
|
||||
|
||||
@@ -58,8 +58,8 @@ impl K8sClient {
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if any deployment in the given namespace matching the label selector
|
||||
// has status.availableReplicas > 0 (or condition Available=True).
|
||||
/// Returns true if any deployment in the given namespace matching the label selector
|
||||
/// has status.availableReplicas > 0 (or condition Available=True).
|
||||
pub async fn has_healthy_deployment_with_label(
|
||||
&self,
|
||||
namespace: &str,
|
||||
@@ -80,10 +80,10 @@ impl K8sClient {
|
||||
}
|
||||
// Fallback: scan conditions
|
||||
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||
if conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
}) {
|
||||
if conds
|
||||
.iter()
|
||||
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
@@ -91,8 +91,8 @@ impl K8sClient {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||
// matching the label selector (equivalent to kubectl -A -l ...).
|
||||
/// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||
/// matching the label selector (equivalent to kubectl -A -l ...).
|
||||
pub async fn list_namespaces_with_healthy_deployments(
|
||||
&self,
|
||||
label_selector: &str,
|
||||
@@ -119,10 +119,9 @@ impl K8sClient {
|
||||
.as_ref()
|
||||
.and_then(|s| s.conditions.as_ref())
|
||||
.map(|conds| {
|
||||
conds.iter().any(|c| {
|
||||
c.type_ == "Available"
|
||||
&& c.status == "True"
|
||||
})
|
||||
conds
|
||||
.iter()
|
||||
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||
})
|
||||
.unwrap_or(false)
|
||||
};
|
||||
@@ -134,8 +133,11 @@ impl K8sClient {
|
||||
Ok(healthy_ns.into_keys().collect())
|
||||
}
|
||||
|
||||
// Get the application-controller ServiceAccount name (fallback to default)
|
||||
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
|
||||
/// Get the application-controller ServiceAccount name (fallback to default)
|
||||
pub async fn get_controller_service_account_name(
|
||||
&self,
|
||||
ns: &str,
|
||||
) -> Result<Option<String>, Error> {
|
||||
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
|
||||
let list = api.list(&lp).await?;
|
||||
@@ -146,10 +148,10 @@ impl K8sClient {
|
||||
.and_then(|ds| ds.template.spec.as_ref())
|
||||
.and_then(|ps| ps.service_account_name.clone())
|
||||
{
|
||||
return Ok(sa);
|
||||
return Ok(Some(sa));
|
||||
}
|
||||
}
|
||||
Ok("argocd-application-controller".to_string())
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
// List ClusterRoleBindings dynamically and return as JSON values
|
||||
@@ -170,10 +172,9 @@ impl K8sClient {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||
/// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
|
||||
let sa = self.get_argocd_controller_sa_name(ns).await?;
|
||||
pub async fn is_service_account_cluster_wide(&self, sa: &str, ns: &str) -> Result<bool, Error> {
|
||||
let crbs = self.list_clusterrolebindings_json().await?;
|
||||
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||
for crb in crbs {
|
||||
|
||||
@@ -622,7 +622,7 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{trace, debug, warn};
|
||||
use log::{debug, trace, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
@@ -644,9 +644,17 @@ impl Ingress for K8sAnywhereTopology {
|
||||
kind: "IngressController".into(),
|
||||
};
|
||||
let ic = client
|
||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
||||
.get_resource_json_value(
|
||||
"default",
|
||||
Some("openshift-ingress-operator"),
|
||||
&gvk,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
|
||||
.map_err(|_| {
|
||||
PreparationError::new(
|
||||
"Failed to fetch IngressController".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||
return Ok(format!("{service}.{domain}"));
|
||||
@@ -654,7 +662,9 @@ impl Ingress for K8sAnywhereTopology {
|
||||
warn!("OpenShift IngressController present but no status.domain set");
|
||||
}
|
||||
} else {
|
||||
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
|
||||
trace!(
|
||||
"OpenShift ingress operator not detected; trying generic Kubernetes"
|
||||
);
|
||||
}
|
||||
|
||||
// 2) Try NGINX Ingress Controller common setups
|
||||
@@ -668,7 +678,9 @@ impl Ingress for K8sAnywhereTopology {
|
||||
|
||||
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
|
||||
warn!(
|
||||
"Could not determine external ingress domain; falling back to internal-only DNS"
|
||||
);
|
||||
let internal = format!("{service}.default.svc.cluster.local");
|
||||
Ok(internal)
|
||||
}
|
||||
@@ -682,7 +694,7 @@ impl Ingress for K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||
use log::{trace, debug};
|
||||
use log::{debug, trace};
|
||||
|
||||
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||
let svc_gvk = GroupVersionKind {
|
||||
@@ -700,8 +712,14 @@ async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, Prepa
|
||||
|
||||
for (ns, name) in candidates {
|
||||
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
|
||||
if let Ok(svc) = client
|
||||
.get_resource_json_value(ns, Some(name), &svc_gvk)
|
||||
.await
|
||||
{
|
||||
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"]
|
||||
.as_array()
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
for entry in lb_hosts {
|
||||
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||
debug!("Found NGINX LB hostname: {host}");
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct Helm {
|
||||
pub skip_schema_validation: Option<bool>,
|
||||
pub version: Option<String>,
|
||||
pub kube_version: Option<String>,
|
||||
// pub api_versions: Vec<String>,
|
||||
pub api_versions: Vec<String>,
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
},
|
||||
@@ -283,7 +283,7 @@ mod tests {
|
||||
skip_schema_validation: None,
|
||||
version: None,
|
||||
kube_version: None,
|
||||
// api_versions: vec![],
|
||||
api_versions: vec![],
|
||||
namespace: None,
|
||||
},
|
||||
path: "".to_string(),
|
||||
|
||||
@@ -86,10 +86,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||
info!(
|
||||
"Argo CD installed cluster-wide in namespace '{}'.",
|
||||
ns
|
||||
);
|
||||
info!("Argo CD installed cluster-wide in namespace '{}'.", ns);
|
||||
(ns, false)
|
||||
}
|
||||
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||
|
||||
@@ -10,11 +10,13 @@ use crate::{
|
||||
data::Version,
|
||||
inventory::Inventory,
|
||||
modules::application::{
|
||||
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
|
||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
||||
features::{ArgoApplication, ArgoHelmScore},
|
||||
webapp::Webapp,
|
||||
},
|
||||
score::Score,
|
||||
topology::{
|
||||
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
|
||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -149,10 +151,10 @@ impl<
|
||||
let domain = if topology.current_target() == DeploymentTarget::Production {
|
||||
self.application.dns()
|
||||
} else {
|
||||
topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?
|
||||
topology
|
||||
.get_domain(&self.application.name())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?
|
||||
};
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
@@ -197,7 +199,6 @@ impl<
|
||||
namespace: format!("{}", self.application.name()),
|
||||
openshift: true,
|
||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||
version: Version::from("0.2.1").unwrap(),
|
||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||
|
||||
@@ -116,7 +116,12 @@ pub async fn discover_argo_all(
|
||||
}
|
||||
|
||||
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
|
||||
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
|
||||
|
||||
let sa = k8s
|
||||
.get_controller_service_account_name(&ns)
|
||||
.await?
|
||||
.unwrap_or("argocd-application-controller".to_string());
|
||||
let scope = match k8s.is_service_account_cluster_wide(&sa, &ns).await {
|
||||
Ok(true) => {
|
||||
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
|
||||
ArgoScope::ClusterWide(ns.to_string())
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod application;
|
||||
pub mod argocd;
|
||||
pub mod cert_manager;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
@@ -17,4 +18,3 @@ pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
pub mod tftp;
|
||||
pub mod argocd;
|
||||
|
||||
Reference in New Issue
Block a user