feat(application): Webapp feature with production dns #167

Merged
johnride merged 7 commits from feat/webappdns into master 2026-01-06 20:15:29 +00:00
144 changed files with 1727 additions and 5977 deletions
Showing only changes of commit 9359d43fe1 - Show all commits

View File

@@ -22,7 +22,7 @@ use kube::{
};
use log::{debug, error, info, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{json, Value};
use serde_json::{Value, json};
use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep};
@@ -58,8 +58,8 @@ impl K8sClient {
})
}
// Returns true if any deployment in the given namespace matching the label selector
// has status.availableReplicas > 0 (or condition Available=True).
/// Returns true if any deployment in the given namespace matching the label selector
/// has status.availableReplicas > 0 (or condition Available=True).
johnride marked this conversation as resolved Outdated

Should it be doc comments? Same question for the other functions below

Should it be doc comments? Same question for the other functions below
pub async fn has_healthy_deployment_with_label(
&self,
namespace: &str,
@@ -80,10 +80,10 @@ impl K8sClient {
}
// Fallback: scan conditions
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
if conds.iter().any(|c| {
c.type_ == "Available"
&& c.status == "True"
}) {
if conds
.iter()
.any(|c| c.type_ == "Available" && c.status == "True")
{
return Ok(true);
}
}
@@ -91,8 +91,8 @@ impl K8sClient {
Ok(false)
}
// Cluster-wide: returns namespaces that have at least one healthy deployment
// matching the label selector (equivalent to kubectl -A -l ...).
/// Cluster-wide: returns namespaces that have at least one healthy deployment
/// matching the label selector (equivalent to kubectl -A -l ...).
pub async fn list_namespaces_with_healthy_deployments(
&self,
label_selector: &str,
@@ -119,10 +119,9 @@ impl K8sClient {
.as_ref()
.and_then(|s| s.conditions.as_ref())
.map(|conds| {
conds.iter().any(|c| {
c.type_ == "Available"
&& c.status == "True"
})
conds
.iter()
.any(|c| c.type_ == "Available" && c.status == "True")
})
.unwrap_or(false)
};
@@ -134,8 +133,11 @@ impl K8sClient {
Ok(healthy_ns.into_keys().collect())
}
// Get the application-controller ServiceAccount name (fallback to default)
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
/// Get the application-controller ServiceAccount name (fallback to default)
pub async fn get_controller_service_account_name(
&self,
johnride marked this conversation as resolved Outdated

Does it have to be that specific? Or would it be ok if the signature was Result<Option<String>, Error> and that users of this function can decide what to do without values?

For example:

client.get_controller_service_account_name("namespace").unwrap_or("argocd-application-controller".to_string())
Does it have to be that specific? Or would it be ok if the signature was `Result<Option<String>, Error>` and that users of this function can decide what to do without values? For example: ```rs client.get_controller_service_account_name("namespace").unwrap_or("argocd-application-controller".to_string()) ```
ns: &str,
) -> Result<Option<String>, Error> {
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
let list = api.list(&lp).await?;
@@ -146,10 +148,10 @@ impl K8sClient {
.and_then(|ds| ds.template.spec.as_ref())
.and_then(|ps| ps.service_account_name.clone())
{
return Ok(sa);
return Ok(Some(sa));
}
}
Ok("argocd-application-controller".to_string())
Ok(None)
}
// List ClusterRoleBindings dynamically and return as JSON values
@@ -170,10 +172,9 @@ impl K8sClient {
Ok(out)
}
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
/// Determine if Argo controller in ns has cluster-wide permissions via CRBs
johnride marked this conversation as resolved Outdated

In addition to the comment above, I think it wouldn't be too tricky to refactor (if needed in another PR) with something like:

let service_account = k8s.get_controller_service_account("namespace").await?.unwrap_or("argocd-application-controller".to_string());
let cluster_wide = k8s.is_service_account_cluster_wide(service_account, "namespace").await?;
In addition to the comment above, I think it wouldn't be too tricky to refactor (if needed in another PR) with something like: ```rs let service_account = k8s.get_controller_service_account("namespace").await?.unwrap_or("argocd-application-controller".to_string()); let cluster_wide = k8s.is_service_account_cluster_wide(service_account, "namespace").await?; ```
// TODO This does not belong in the generic k8s client, should be refactored at some point
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
let sa = self.get_argocd_controller_sa_name(ns).await?;
pub async fn is_service_account_cluster_wide(&self, sa: &str, ns: &str) -> Result<bool, Error> {
let crbs = self.list_clusterrolebindings_json().await?;
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
for crb in crbs {

View File

@@ -622,7 +622,7 @@ impl TenantManager for K8sAnywhereTopology {
#[async_trait]
impl Ingress for K8sAnywhereTopology {
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
use log::{trace, debug, warn};
use log::{debug, trace, warn};
let client = self.k8s_client().await?;
@@ -644,9 +644,17 @@ impl Ingress for K8sAnywhereTopology {
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.get_resource_json_value(
"default",
Some("openshift-ingress-operator"),
&gvk,
)
.await
.map_err(|_| PreparationError::new("Failed to fetch IngressController".to_string()))?;
.map_err(|_| {
PreparationError::new(
"Failed to fetch IngressController".to_string(),
)
})?;
if let Some(domain) = ic.data["status"]["domain"].as_str() {
return Ok(format!("{service}.{domain}"));
@@ -654,7 +662,9 @@ impl Ingress for K8sAnywhereTopology {
warn!("OpenShift IngressController present but no status.domain set");
}
} else {
trace!("OpenShift ingress operator not detected; trying generic Kubernetes");
trace!(
"OpenShift ingress operator not detected; trying generic Kubernetes"
);
}
// 2) Try NGINX Ingress Controller common setups
@@ -668,7 +678,9 @@ impl Ingress for K8sAnywhereTopology {
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
warn!("Could not determine external ingress domain; falling back to internal-only DNS");
warn!(
"Could not determine external ingress domain; falling back to internal-only DNS"
);
let internal = format!("{service}.default.svc.cluster.local");
Ok(internal)
}
@@ -682,7 +694,7 @@ impl Ingress for K8sAnywhereTopology {
}
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
use log::{trace, debug};
use log::{debug, trace};
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
let svc_gvk = GroupVersionKind {
@@ -700,8 +712,14 @@ async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, Prepa
for (ns, name) in candidates {
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
if let Ok(svc) = client.get_resource_json_value(ns, Some(name), &svc_gvk).await {
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"].as_array().cloned().unwrap_or_default();
if let Ok(svc) = client
.get_resource_json_value(ns, Some(name), &svc_gvk)
.await
{
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"]
.as_array()
.cloned()
.unwrap_or_default();
for entry in lb_hosts {
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
debug!("Found NGINX LB hostname: {host}");

View File

@@ -21,7 +21,7 @@ pub struct Helm {
pub skip_schema_validation: Option<bool>,
pub version: Option<String>,
pub kube_version: Option<String>,
// pub api_versions: Vec<String>,
pub api_versions: Vec<String>,
johnride marked this conversation as resolved Outdated

still needed?

still needed?
pub namespace: Option<String>,
}
@@ -105,7 +105,7 @@ impl Default for ArgoApplication {
skip_schema_validation: None,
version: None,
kube_version: None,
// api_versions: vec![],
api_versions: vec![],
namespace: None,
},
path: "".to_string(),
@@ -155,7 +155,7 @@ impl From<CDApplicationConfig> for ArgoApplication {
skip_schema_validation: None,
version: None,
kube_version: None,
// api_versions: vec![],
api_versions: vec![],
namespace: None,
},
},
@@ -283,7 +283,7 @@ mod tests {
skip_schema_validation: None,
version: None,
kube_version: None,
// api_versions: vec![],
api_versions: vec![],
namespace: None,
},
path: "".to_string(),

View File

@@ -86,10 +86,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
(ns, false)
}
ArgoDeploymentType::InstalledClusterWide(ns) => {
info!(
"Argo CD installed cluster-wide in namespace '{}'.",
ns
);
info!("Argo CD installed cluster-wide in namespace '{}'.", ns);
(ns, false)
}
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {

View File

@@ -10,11 +10,13 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
webapp::Webapp,
},
score::Score,
topology::{
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
},
};
@@ -197,7 +199,6 @@ impl<
namespace: format!("{}", self.application.name()),
openshift: true,
johnride marked this conversation as resolved Outdated

outdated comment -> to be removed

outdated comment -> to be removed
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.2.1").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),

View File

@@ -116,7 +116,12 @@ pub async fn discover_argo_all(
}
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
let scope = match k8s.is_argocd_cluster_wide(&ns).await {
let sa = k8s
.get_controller_service_account_name(&ns)
.await?
.unwrap_or("argocd-application-controller".to_string());
let scope = match k8s.is_service_account_cluster_wide(&sa, &ns).await {
Ok(true) => {
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
ArgoScope::ClusterWide(ns.to_string())

View File

@@ -1,4 +1,5 @@
pub mod application;
pub mod argocd;
pub mod cert_manager;
pub mod dhcp;
pub mod dns;
@@ -17,4 +18,3 @@ pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;
pub mod argocd;