wip: install argocd app depending on how argocd is already installed in the cluster
This commit is contained in:
parent
528ee8a696
commit
0700e30299
@ -27,6 +27,7 @@ async fn main() {
|
|||||||
};
|
};
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "example-monitoring".to_string(),
|
name: "example-monitoring".to_string(),
|
||||||
|
dns: "example-monitoring.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
@ -16,6 +16,7 @@ use harmony_types::net::Url;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "test-rhob-monitoring".to_string(),
|
name: "test-rhob-monitoring".to_string(),
|
||||||
|
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
@ -19,6 +19,7 @@ use harmony_macros::hurl;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-rust-webapp".to_string(),
|
name: "harmony-example-rust-webapp".to_string(),
|
||||||
|
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./webapp"),
|
project_root: PathBuf::from("./webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
@ -2,12 +2,11 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, LocalhostTopology},
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use std::{path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
@ -22,8 +21,8 @@ async fn main() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let discord_webhook = DiscordWebhook {
|
let discord_webhook = DiscordWebhook {
|
||||||
name: "harmony_demo".to_string(),
|
name: "harmony-demo".to_string(),
|
||||||
url: hurl!("http://not_a_url.com"),
|
url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
|
@ -16,6 +16,7 @@ use std::{path::PathBuf, sync::Arc};
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-tryrust".to_string(),
|
name: "harmony-example-tryrust".to_string(),
|
||||||
|
dns: "tryrust.example.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
||||||
// submodule
|
// submodule
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
use std::time::Duration;
|
use std::{collections::HashMap, time::Duration};
|
||||||
|
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use k8s_openapi::{
|
use k8s_openapi::{
|
||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{apps::v1::Deployment, core::v1::Pod},
|
api::{apps::v1::Deployment, core::v1::Pod},
|
||||||
|
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
|
||||||
apimachinery::pkg::version::Info,
|
apimachinery::pkg::version::Info,
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
@ -21,7 +22,7 @@ use kube::{
|
|||||||
};
|
};
|
||||||
use log::{debug, error, trace};
|
use log::{debug, error, trace};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::{Value, json};
|
use serde_json::{json, Value};
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::{io::AsyncReadExt, time::sleep};
|
||||||
|
|
||||||
@ -57,6 +58,148 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true if any deployment in the given namespace matching the label selector
|
||||||
|
// has status.availableReplicas > 0 (or condition Available=True).
|
||||||
|
pub async fn has_healthy_deployment_with_label(
|
||||||
|
&self,
|
||||||
|
namespace: &str,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
let lp = ListParams::default().labels(label_selector);
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
for d in list.items {
|
||||||
|
// Check AvailableReplicas > 0 or Available condition
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
if available > 0 {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
// Fallback: scan conditions
|
||||||
|
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||||
|
if conds.iter().any(|c| {
|
||||||
|
c.type_ == "Available"
|
||||||
|
&& c.status == "True"
|
||||||
|
}) {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||||
|
// matching the label selector (equivalent to kubectl -A -l ...).
|
||||||
|
pub async fn list_namespaces_with_healthy_deployments(
|
||||||
|
&self,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
|
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||||
|
let lp = ListParams::default().labels(label_selector);
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
|
||||||
|
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||||
|
for d in list.items {
|
||||||
|
let ns = match d.metadata.namespace.clone() {
|
||||||
|
Some(n) => n,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
let is_healthy = if available > 0 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
d.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.conditions.as_ref())
|
||||||
|
.map(|conds| {
|
||||||
|
conds.iter().any(|c| {
|
||||||
|
c.type_ == "Available"
|
||||||
|
&& c.status == "True"
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or(false)
|
||||||
|
};
|
||||||
|
if is_healthy {
|
||||||
|
healthy_ns.insert(ns, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(healthy_ns.into_keys().collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the application-controller ServiceAccount name (fallback to default)
|
||||||
|
pub async fn get_argocd_controller_sa_name(&self, ns: &str) -> Result<String, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||||
|
let lp = ListParams::default().labels("app.kubernetes.io/name=argocd-application-controller");
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
if let Some(dep) = list.items.get(0) {
|
||||||
|
if let Some(sa) = dep
|
||||||
|
.spec
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|ds| ds.template.spec.as_ref())
|
||||||
|
.and_then(|ps| ps.service_account_name.clone())
|
||||||
|
{
|
||||||
|
return Ok(sa);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok("argocd-application-controller".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
// List ClusterRoleBindings dynamically and return as JSON values
|
||||||
|
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||||
|
let gvk = kube::api::GroupVersionKind::gvk(
|
||||||
|
"rbac.authorization.k8s.io",
|
||||||
|
"v1",
|
||||||
|
"ClusterRoleBinding",
|
||||||
|
);
|
||||||
|
let ar = kube::api::ApiResource::from_gvk(&gvk);
|
||||||
|
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||||
|
let crbs = api.list(&ListParams::default()).await?;
|
||||||
|
let mut out = Vec::new();
|
||||||
|
for o in crbs {
|
||||||
|
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
|
||||||
|
out.push(v);
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||||
|
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||||
|
pub async fn is_argocd_cluster_wide(&self, ns: &str) -> Result<bool, Error> {
|
||||||
|
let sa = self.get_argocd_controller_sa_name(ns).await?;
|
||||||
|
let crbs = self.list_clusterrolebindings_json().await?;
|
||||||
|
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||||
|
for crb in crbs {
|
||||||
|
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||||
|
for subj in subjects {
|
||||||
|
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||||
|
|| (kind == "User" && name == sa_user)
|
||||||
|
{
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||||
|
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||||
|
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
|
||||||
|
let crds = api.list(&lp).await?;
|
||||||
|
Ok(!crds.items.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||||
let client: Client = self.client.clone();
|
let client: Client = self.client.clone();
|
||||||
let version_info: Info = client.apiserver_version().await?;
|
let version_info: Info = client.apiserver_version().await?;
|
||||||
|
@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl DnsServer for OPNSenseFirewall {
|
impl DnsServer for OPNSenseFirewall {
|
||||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||||
todo!("Refactor this to use dnsmasq")
|
todo!("Refactor this to use dnsmasq")
|
||||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
// let mut dns = writable_opnsense.dns();
|
// let mut dns = writable_opnsense.dns();
|
||||||
@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
|
|||||||
self.host.clone()
|
self.host.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
|
||||||
todo!("Refactor this to use dnsmasq")
|
todo!("Refactor this to use dnsmasq")
|
||||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
// let mut dns = writable_opnsense.dns();
|
// let mut dns = writable_opnsense.dns();
|
||||||
|
@ -1,21 +1,16 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use kube::{Api, api::GroupVersionKind};
|
use kube::api::GroupVersionKind;
|
||||||
use log::{debug, warn};
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde::de::DeserializeOwned;
|
use std::{str::FromStr, sync::Arc};
|
||||||
use std::{process::Command, str::FromStr, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
modules::{argocd::{detect_argo_deployment_type, ArgoDeploymentType}, helm::chart::{HelmChartScore, HelmRepository}},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{ingress::Ingress, k8s::K8sClient, HelmCommand, K8sclient, Topology},
|
||||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
|
||||||
k8s::K8sClient,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
@ -24,6 +19,7 @@ use super::ArgoApplication;
|
|||||||
#[derive(Debug, Serialize, Clone)]
|
#[derive(Debug, Serialize, Clone)]
|
||||||
pub struct ArgoHelmScore {
|
pub struct ArgoHelmScore {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
|
// TODO remove this field and rely on topology, it can now know what flavor it is running
|
||||||
pub openshift: bool,
|
pub openshift: bool,
|
||||||
pub argo_apps: Vec<ArgoApplication>,
|
pub argo_apps: Vec<ArgoApplication>,
|
||||||
}
|
}
|
||||||
@ -57,10 +53,17 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
let k8s_client = topology.k8s_client().await?;
|
let k8s_client = topology.k8s_client().await?;
|
||||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
let svc = format!("argo-{}", self.score.namespace.clone());
|
||||||
let domain = topology.get_domain(&svc).await?;
|
let domain = topology.get_domain(&svc).await?;
|
||||||
// FIXME we now have a way to know if we're running on openshift family
|
|
||||||
|
let current_argo_deployment = detect_argo_deployment_type(&k8s_client, &self.score.namespace).await?;
|
||||||
|
|
||||||
|
match current_argo_deployment {
|
||||||
|
ArgoDeploymentType::NotInstalled => todo!(),
|
||||||
|
ArgoDeploymentType::AvailableInDesiredNamespace(_) => todo!(),
|
||||||
|
ArgoDeploymentType::InstalledClusterWide(_) => todo!(),
|
||||||
|
ArgoDeploymentType::InstalledNamespaceScoped(_) => todo!(),
|
||||||
|
};
|
||||||
let helm_score =
|
let helm_score =
|
||||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
||||||
|
|
||||||
helm_score.interpret(inventory, topology).await?;
|
helm_score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
@ -99,38 +102,6 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ArgoInterpret {
|
|
||||||
pub async fn get_host_domain(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
openshift: bool,
|
|
||||||
) -> Result<String, InterpretError> {
|
|
||||||
//This should be the job of the topology to determine if we are in
|
|
||||||
//openshift, potentially we need on openshift topology the same way we create a
|
|
||||||
//localhosttopology
|
|
||||||
match openshift {
|
|
||||||
true => {
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
|
||||||
Some(domain) => return Ok(domain.to_string()),
|
|
||||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false => {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn argo_helm_chart_score(namespace: &str, openshift: bool, domain: &str) -> HelmChartScore {
|
pub fn argo_helm_chart_score(namespace: &str, openshift: bool, domain: &str) -> HelmChartScore {
|
||||||
let values = format!(
|
let values = format!(
|
||||||
r#"
|
r#"
|
||||||
|
@ -3,7 +3,6 @@ use std::sync::Arc;
|
|||||||
use crate::modules::application::{
|
use crate::modules::application::{
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
|
||||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
|
@ -205,10 +205,10 @@ impl RustWebapp {
|
|||||||
Some(body_full(tar_data.into())),
|
Some(body_full(tar_data.into())),
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(mut msg) = image_build_stream.next().await {
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
trace!("Got bollard msg {msg:?}");
|
trace!("Got bollard msg {msg:?}");
|
||||||
match msg {
|
match msg {
|
||||||
Ok(mut msg) => {
|
Ok(msg) => {
|
||||||
if let Some(progress) = msg.progress_detail {
|
if let Some(progress) = msg.progress_detail {
|
||||||
info!(
|
info!(
|
||||||
"Build progress {}/{}",
|
"Build progress {}/{}",
|
||||||
|
@ -1,20 +0,0 @@
|
|||||||
/// Discover the current ArgoCD setup
|
|
||||||
///
|
|
||||||
/// 1. No argo installed
|
|
||||||
/// 2. Argo installed in current namespace
|
|
||||||
/// 3. Argo installed in different namespace (assuming cluster wide access)
|
|
||||||
///
|
|
||||||
/// For now we will go ahead with this very basic logic, there are many intricacies that can be
|
|
||||||
/// dealt with later, such as multitenant management in a single argo instance, credentials setup t
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait ArgoCD {
|
|
||||||
async fn ensure_installed() {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CurrentNamespaceArgo;
|
|
||||||
|
|
||||||
|
|
||||||
impl ArgoCD for CurrentNamespaceArgo {
|
|
||||||
}
|
|
@ -1,2 +1,118 @@
|
|||||||
mod discover;
|
use std::sync::Arc;
|
||||||
pub use discover::*;
|
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
|
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ArgoScope {
|
||||||
|
ClusterWide(String),
|
||||||
|
NamespaceScoped(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DiscoveredArgo {
|
||||||
|
pub control_namespace: String,
|
||||||
|
pub scope: ArgoScope,
|
||||||
|
pub has_crds: bool,
|
||||||
|
pub has_applicationset: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ArgoDeploymentType {
|
||||||
|
NotInstalled,
|
||||||
|
AvailableInDesiredNamespace(String),
|
||||||
|
InstalledClusterWide(String),
|
||||||
|
InstalledNamespaceScoped(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn discover_argo_all(
|
||||||
|
k8s: &Arc<K8sClient>,
|
||||||
|
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
|
||||||
|
// CRDs
|
||||||
|
let mut has_crds = true;
|
||||||
|
for crd in vec!["applications.argoproj.io", "appprojects.argoproj.io"] {
|
||||||
|
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if !crd_exists {
|
||||||
|
info!("Missing argo CRD {crd}, looks like ArgoCD is not installed");
|
||||||
|
has_crds = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Namespaces that have healthy argocd deployments
|
||||||
|
let candidate_namespaces = k8s
|
||||||
|
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
|
||||||
|
|
||||||
|
let mut found = Vec::new();
|
||||||
|
for ns in candidate_namespaces {
|
||||||
|
// Require the application-controller to be healthy (sanity check)
|
||||||
|
let controller_ok = k8s
|
||||||
|
.has_healthy_deployment_with_label(
|
||||||
|
&ns,
|
||||||
|
"app.kubernetes.io/name=argocd-application-controller",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_or(false);
|
||||||
|
if !controller_ok {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let scope = if k8s.is_argocd_cluster_wide(&ns).await? {
|
||||||
|
ArgoScope::ClusterWide(ns.to_string())
|
||||||
|
} else {
|
||||||
|
ArgoScope::NamespaceScoped(ns.to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let argo = DiscoveredArgo {
|
||||||
|
control_namespace: ns,
|
||||||
|
scope,
|
||||||
|
has_crds,
|
||||||
|
has_applicationset: k8s.has_crd("applicationsets.argoproj.io").await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!("Found argo instance {argo:?}");
|
||||||
|
|
||||||
|
found.push(argo);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(found)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn detect_argo_deployment_type(
|
||||||
|
k8s: &Arc<K8sClient>,
|
||||||
|
desired_namespace: &str,
|
||||||
|
) -> Result<ArgoDeploymentType, InterpretError> {
|
||||||
|
let discovered = discover_argo_all(k8s).await?;
|
||||||
|
|
||||||
|
if discovered.is_empty() {
|
||||||
|
return Ok(ArgoDeploymentType::NotInstalled);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(d) = discovered
|
||||||
|
.iter()
|
||||||
|
.find(|d| d.control_namespace == desired_namespace)
|
||||||
|
{
|
||||||
|
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
|
||||||
|
d.control_namespace.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(d) = discovered
|
||||||
|
.iter()
|
||||||
|
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
|
||||||
|
{
|
||||||
|
return Ok(ArgoDeploymentType::InstalledClusterWide(
|
||||||
|
d.control_namespace.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
|
||||||
|
discovered[0].control_namespace.clone(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
@ -90,12 +90,12 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
// refactoring to do it now
|
// refactoring to do it now
|
||||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||||
storage_drives,
|
storage_drives,
|
||||||
storage_controller,
|
storage_controller: _,
|
||||||
memory_modules,
|
memory_modules,
|
||||||
cpus,
|
cpus,
|
||||||
chipset,
|
chipset: _,
|
||||||
network_interfaces,
|
network_interfaces,
|
||||||
management_interface,
|
management_interface: _,
|
||||||
host_uuid,
|
host_uuid,
|
||||||
} = host;
|
} = host;
|
||||||
|
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||||
LabelSelector, PrometheusSpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
Loading…
Reference in New Issue
Block a user