Compare commits

..

7 Commits

Author SHA1 Message Date
528ee8a696 wip: argocd discovery
Some checks failed
Run Check Script / check (pull_request) Failing after 13s
2025-10-14 21:16:02 -04:00
69a159711a feat: Support tls enabled by default on rust web app
Some checks failed
Run Check Script / check (pull_request) Failing after 15s
2025-10-14 15:53:23 -04:00
b0ad7bb4c4 feat(application): Webapp feature with production dns
Some checks failed
Run Check Script / check (pull_request) Failing after 17s
2025-10-14 15:19:12 -04:00
5f78300d78 Merge branch 'master' into feat/detect_k8s_flavour
All checks were successful
Run Check Script / check (pull_request) Successful in 1m20s
2025-10-02 17:14:30 -04:00
2d3c32469c chore: Simplify k8s flavour detection algorithm and do not unwrap when it cannot be detected, just return Err 2025-09-30 22:59:50 -04:00
1cec398d4d fix: modifed naming scheme to OpenshiftFamily, K3sFamily, and defaultswitched discovery of openshiftfamily to look for projet.openshift.io 2025-09-29 11:29:34 -04:00
f073b7e5fb feat:added k8s flavour to k8s_aywhere topology to be able to get the type of cluster
All checks were successful
Run Check Script / check (pull_request) Successful in 33s
2025-09-24 13:28:46 -04:00
25 changed files with 252 additions and 133 deletions

View File

@@ -4,9 +4,10 @@ use derive_new::new;
use k8s_openapi::{ use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope, ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod}, api::{apps::v1::Deployment, core::v1::Pod},
apimachinery::pkg::version::Info,
}; };
use kube::{ use kube::{
Client, Config, Error, Resource, Client, Config, Discovery, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt}, api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig}, config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse, core::ErrorResponse,
@@ -20,7 +21,7 @@ use kube::{
}; };
use log::{debug, error, trace}; use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use serde_json::json; use serde_json::{Value, json};
use similar::TextDiff; use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep}; use tokio::{io::AsyncReadExt, time::sleep};
@@ -56,6 +57,17 @@ impl K8sClient {
}) })
} }
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
let client: Client = self.client.clone();
let version_info: Info = client.apiserver_version().await?;
Ok(version_info)
}
pub async fn discovery(&self) -> Result<Discovery, Error> {
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
Ok(discovery)
}
pub async fn get_resource_json_value( pub async fn get_resource_json_value(
&self, &self,
name: &str, name: &str,
@@ -68,7 +80,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced_with(self.client.clone(), &gvk) Api::default_namespaced_with(self.client.clone(), &gvk)
}; };
resource.get(name).await Ok(resource.get(name).await?)
} }
pub async fn get_deployment( pub async fn get_deployment(
@@ -81,7 +93,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
deps.get_opt(name).await Ok(deps.get_opt(name).await?)
} }
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> { pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
@@ -90,7 +102,7 @@ impl K8sClient {
} else { } else {
Api::default_namespaced(self.client.clone()) Api::default_namespaced(self.client.clone())
}; };
pods.get_opt(name).await Ok(pods.get_opt(name).await?)
} }
pub async fn scale_deployment( pub async fn scale_deployment(
@@ -167,12 +179,14 @@ impl K8sClient {
loop { loop {
let pod = self.get_pod(pod_name, namespace).await?; let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod if let Some(p) = pod {
&& let Some(status) = p.status if let Some(status) = p.status {
&& let Some(phase) = status.phase if let Some(phase) = status.phase {
&& phase.to_lowercase() == "running" if phase.to_lowercase() == "running" {
{ return Ok(());
return Ok(()); }
}
}
} }
if elapsed >= timeout_secs { if elapsed >= timeout_secs {
@@ -235,7 +249,7 @@ impl K8sClient {
if let Some(s) = status.status { if let Some(s) = status.status {
let mut stdout_buf = String::new(); let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout() { if let Some(mut stdout) = process.stdout().take() {
stdout stdout
.read_to_string(&mut stdout_buf) .read_to_string(&mut stdout_buf)
.await .await

View File

@@ -47,6 +47,13 @@ struct K8sState {
message: String, message: String,
} }
#[derive(Debug, Clone)]
pub enum KubernetesDistribution {
OpenshiftFamily,
K3sFamily,
Default,
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
enum K8sSource { enum K8sSource {
LocalK3d, LocalK3d,
@@ -57,6 +64,7 @@ enum K8sSource {
pub struct K8sAnywhereTopology { pub struct K8sAnywhereTopology {
k8s_state: Arc<OnceCell<Option<K8sState>>>, k8s_state: Arc<OnceCell<Option<K8sState>>>,
tenant_manager: Arc<OnceCell<K8sTenantManager>>, tenant_manager: Arc<OnceCell<K8sTenantManager>>,
flavour: Arc<OnceCell<KubernetesDistribution>>,
config: Arc<K8sAnywhereConfig>, config: Arc<K8sAnywhereConfig>,
} }
@@ -162,6 +170,7 @@ impl K8sAnywhereTopology {
Self { Self {
k8s_state: Arc::new(OnceCell::new()), k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(K8sAnywhereConfig::from_env()), config: Arc::new(K8sAnywhereConfig::from_env()),
} }
} }
@@ -170,10 +179,42 @@ impl K8sAnywhereTopology {
Self { Self {
k8s_state: Arc::new(OnceCell::new()), k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(config), config: Arc::new(config),
} }
} }
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
self.flavour
.get_or_try_init(async || {
let client = self.k8s_client().await.unwrap();
let discovery = client.discovery().await.map_err(|e| {
PreparationError::new(format!("Could not discover API groups: {}", e))
})?;
let version = client.get_apiserver_version().await.map_err(|e| {
PreparationError::new(format!("Could not get server version: {}", e))
})?;
// OpenShift / OKD
if discovery
.groups()
.any(|g| g.name() == "project.openshift.io")
{
return Ok(KubernetesDistribution::OpenshiftFamily);
}
// K3d / K3s
if version.git_version.contains("k3s") {
return Ok(KubernetesDistribution::K3sFamily);
}
return Ok(KubernetesDistribution::Default);
})
.await
}
async fn get_cluster_observability_operator_prometheus_application_score( async fn get_cluster_observability_operator_prometheus_application_score(
&self, &self,
sender: RHOBObservability, sender: RHOBObservability,
@@ -212,11 +253,11 @@ impl K8sAnywhereTopology {
.await?; .await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0); let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 { if ready_replicas >= 1 {
Ok(()) return Ok(());
} else { } else {
Err(PreparationError::new( return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(), "openshift-ingress-operator not available".to_string(),
)) ));
} }
} }

View File

@@ -186,7 +186,7 @@ impl TopologyState {
} }
} }
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub enum DeploymentTarget { pub enum DeploymentTarget {
LocalDev, LocalDev,
Staging, Staging,

View File

@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
impl InventoryRepositoryFactory { impl InventoryRepositoryFactory {
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
Ok(Box::new( Ok(Box::new(
SqliteInventoryRepository::new(&DATABASE_URL).await?, SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
)) ))
} }
} }

View File

@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
let path = match &file.path { let path = match &file.path {
crate::data::FilePath::Relative(path) => { crate::data::FilePath::Relative(path) => {
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path) format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
} }
crate::data::FilePath::Absolute(path) => { crate::data::FilePath::Absolute(path) => {
return Err(ExecutorError::ConfigurationError(format!( return Err(ExecutorError::ConfigurationError(format!(

View File

@@ -182,12 +182,16 @@ pub(crate) fn get_health_check_for_backend(
let uppercase = binding.as_str(); let uppercase = binding.as_str();
match uppercase { match uppercase {
"TCP" => { "TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
&& !checkport.is_empty() if !checkport.is_empty() {
{ return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else( |_| {
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"), panic!(
)))); "HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
}
} }
Some(HealthCheck::TCP(None)) Some(HealthCheck::TCP(None))
} }

View File

@@ -8,6 +8,7 @@ mod tftp;
use std::sync::Arc; use std::sync::Arc;
pub use management::*; pub use management::*;
use opnsense_config_xml::Host;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::{executors::ExecutorError, topology::LogicalHost}; use crate::{executors::ExecutorError, topology::LogicalHost};

View File

@@ -1,8 +1,10 @@
use async_trait::async_trait; use async_trait::async_trait;
use kube::api::GroupVersionKind; use kube::{Api, api::GroupVersionKind};
use log::{debug, warn};
use non_blank_string_rs::NonBlankString; use non_blank_string_rs::NonBlankString;
use serde::Serialize; use serde::Serialize;
use std::{str::FromStr, sync::Arc}; use serde::de::DeserializeOwned;
use std::{process::Command, str::FromStr, sync::Arc};
use crate::{ use crate::{
data::Version, data::Version,
@@ -10,7 +12,10 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score, score::Score,
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient}, topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
k8s::K8sClient,
},
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -52,6 +57,8 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
let k8s_client = topology.k8s_client().await?; let k8s_client = topology.k8s_client().await?;
let svc = format!("argo-{}", self.score.namespace.clone()); let svc = format!("argo-{}", self.score.namespace.clone());
let domain = topology.get_domain(&svc).await?; let domain = topology.get_domain(&svc).await?;
// FIXME we now have a way to know if we're running on openshift family
let helm_score = let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain); argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
@@ -114,13 +121,13 @@ impl ArgoInterpret {
match ic.data["status"]["domain"].as_str() { match ic.data["status"]["domain"].as_str() {
Some(domain) => return Ok(domain.to_string()), Some(domain) => return Ok(domain.to_string()),
None => Err(InterpretError::new("Could not find domain".to_string())), None => return Err(InterpretError::new("Could not find domain".to_string())),
} }
} }
false => { false => {
todo!() todo!()
} }
} };
} }
} }

View File

@@ -10,12 +10,11 @@ use crate::{
data::Version, data::Version,
inventory::Inventory, inventory::Inventory,
modules::application::{ modules::application::{
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant, features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
features::{ArgoApplication, ArgoHelmScore},
}, },
score::Score, score::Score,
topology::{ topology::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress, ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
}, },
}; };
@@ -47,11 +46,11 @@ use crate::{
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources /// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration /// - Kubernetes for runtime orchestration
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> { pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
pub application: Arc<A>, pub application: Arc<A>,
} }
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> { impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
async fn deploy_to_local_k3d( async fn deploy_to_local_k3d(
&self, &self,
app_name: String, app_name: String,
@@ -137,7 +136,7 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
#[async_trait] #[async_trait]
impl< impl<
A: OCICompliant + HelmPackage + Clone + 'static, A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for PackagingDeployment<A> > ApplicationFeature<T> for PackagingDeployment<A>
{ {
@@ -146,10 +145,15 @@ impl<
topology: &T, topology: &T,
) -> Result<InstallationOutcome, InstallationError> { ) -> Result<InstallationOutcome, InstallationError> {
let image = self.application.image_name(); let image = self.application.image_name();
let domain = topology
let domain = if topology.current_target() == DeploymentTarget::Production {
self.application.dns()
} else {
topology
.get_domain(&self.application.name()) .get_domain(&self.application.name())
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?
};
// TODO Write CI/CD workflow files // TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github // we can autotedect the CI type using the remote url (default to github action for github
@@ -190,7 +194,7 @@ impl<
info!("Deploying {} to target {target:?}", self.application.name()); info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore { let score = ArgoHelmScore {
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
openshift: true, openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig { argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
@@ -198,8 +202,8 @@ impl<
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()), helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None, values_overrides: None,
name: self.application.name().to_string(), name: format!("{}", self.application.name()),
namespace: self.application.name().to_string(), namespace: format!("{}", self.application.name()),
})], })],
}; };
score score

View File

@@ -3,6 +3,7 @@ use std::sync::Arc;
use crate::modules::application::{ use crate::modules::application::{
Application, ApplicationFeature, InstallationError, InstallationOutcome, Application, ApplicationFeature, InstallationError, InstallationOutcome,
}; };
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;

View File

@@ -2,6 +2,7 @@ mod feature;
pub mod features; pub mod features;
pub mod oci; pub mod oci;
mod rust; mod rust;
mod webapp;
use std::sync::Arc; use std::sync::Arc;
pub use feature::*; pub use feature::*;

View File

@@ -16,6 +16,7 @@ use tar::{Builder, Header};
use walkdir::WalkDir; use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::modules::application::webapp::Webapp;
use crate::{score::Score, topology::Topology}; use crate::{score::Score, topology::Topology};
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@@ -60,6 +61,10 @@ pub struct RustWebapp {
pub project_root: PathBuf, pub project_root: PathBuf,
pub service_port: u32, pub service_port: u32,
pub framework: Option<RustWebFramework>, pub framework: Option<RustWebFramework>,
/// Host name that will be used in production environment.
///
/// This is the place to put the public host name if this is a public facing webapp.
pub dns: String,
} }
impl Application for RustWebapp { impl Application for RustWebapp {
@@ -68,6 +73,12 @@ impl Application for RustWebapp {
} }
} }
impl Webapp for RustWebapp {
fn dns(&self) -> String {
self.dns.clone()
}
}
#[async_trait] #[async_trait]
impl HelmPackage for RustWebapp { impl HelmPackage for RustWebapp {
async fn build_push_helm_package( async fn build_push_helm_package(
@@ -194,10 +205,10 @@ impl RustWebapp {
Some(body_full(tar_data.into())), Some(body_full(tar_data.into())),
); );
while let Some(msg) = image_build_stream.next().await { while let Some(mut msg) = image_build_stream.next().await {
trace!("Got bollard msg {msg:?}"); trace!("Got bollard msg {msg:?}");
match msg { match msg {
Ok(msg) => { Ok(mut msg) => {
if let Some(progress) = msg.progress_detail { if let Some(progress) = msg.progress_detail {
info!( info!(
"Build progress {}/{}", "Build progress {}/{}",
@@ -257,7 +268,6 @@ impl RustWebapp {
".harmony_generated", ".harmony_generated",
"harmony", "harmony",
"node_modules", "node_modules",
"Dockerfile.harmony",
]; ];
let mut entries: Vec<_> = WalkDir::new(project_root) let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter() .into_iter()
@@ -461,73 +471,76 @@ impl RustWebapp {
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest")); let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
let app_name = &self.name;
let service_port = self.service_port;
// Create Chart.yaml // Create Chart.yaml
let chart_yaml = format!( let chart_yaml = format!(
r#" r#"
apiVersion: v2 apiVersion: v2
name: {} name: {chart_name}
description: A Helm chart for the {} web application. description: A Helm chart for the {app_name} web application.
type: application type: application
version: 0.1.0 version: 0.2.0
appVersion: "{}" appVersion: "{image_tag}"
"#, "#,
chart_name, self.name, image_tag
); );
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?; fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
// Create values.yaml // Create values.yaml
let values_yaml = format!( let values_yaml = format!(
r#" r#"
# Default values for {}. # Default values for {chart_name}.
# This is a YAML-formatted file. # This is a YAML-formatted file.
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
replicaCount: 1 replicaCount: 1
image: image:
repository: {} repository: {image_repo}
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# Overridden by the chart's appVersion # Overridden by the chart's appVersion
tag: "{}" tag: "{image_tag}"
service: service:
type: ClusterIP type: ClusterIP
port: {} port: {service_port}
ingress: ingress:
enabled: true enabled: true
tls: true
# Annotations for cert-manager to handle SSL. # Annotations for cert-manager to handle SSL.
annotations: annotations:
# Add other annotations like nginx ingress class if needed # Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
hosts: hosts:
- host: {} - host: {domain}
paths: paths:
- path: / - path: /
pathType: ImplementationSpecific pathType: ImplementationSpecific
"#, "#,
chart_name, image_repo, image_tag, self.service_port, domain,
); );
fs::write(chart_dir.join("values.yaml"), values_yaml)?; fs::write(chart_dir.join("values.yaml"), values_yaml)?;
// Create templates/_helpers.tpl // Create templates/_helpers.tpl
let helpers_tpl = r#" let helpers_tpl = format!(
{{/* r#"
{{{{/*
Expand the name of the chart. Expand the name of the chart.
*/}} */}}}}
{{- define "chart.name" -}} {{{{- define "chart.name" -}}}}
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }} {{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
{{/* {{{{/*
Create a default fully qualified app name. Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}} */}}}}
{{- define "chart.fullname" -}} {{{{- define "chart.fullname" -}}}}
{{- $name := default .Chart.Name $.Values.nameOverride }} {{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
{{- end }} {{{{- end }}}}
"#.to_string(); "#
);
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?; fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
// Create templates/service.yaml // Create templates/service.yaml
@@ -581,7 +594,11 @@ spec:
); );
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?; fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
let service_port = self.service_port;
// Create templates/ingress.yaml // Create templates/ingress.yaml
// TODO get issuer name and tls config from topology as it may be different from one
// cluster to another, also from one version to another
let ingress_yaml = format!( let ingress_yaml = format!(
r#" r#"
{{{{- if $.Values.ingress.enabled -}}}} {{{{- if $.Values.ingress.enabled -}}}}
@@ -594,13 +611,11 @@ metadata:
spec: spec:
{{{{- if $.Values.ingress.tls }}}} {{{{- if $.Values.ingress.tls }}}}
tls: tls:
{{{{- range $.Values.ingress.tls }}}} - secretName: {{{{ include "chart.fullname" . }}}}-tls
- hosts: hosts:
{{{{- range .hosts }}}} {{{{- range $.Values.ingress.hosts }}}}
- {{{{ . | quote }}}} - {{{{ .host | quote }}}}
{{{{- end }}}} {{{{- end }}}}
secretName: {{{{ .secretName }}}}
{{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
rules: rules:
{{{{- range $.Values.ingress.hosts }}}} {{{{- range $.Values.ingress.hosts }}}}
@@ -614,12 +629,11 @@ spec:
service: service:
name: {{{{ include "chart.fullname" $ }}}} name: {{{{ include "chart.fullname" $ }}}}
port: port:
number: {{{{ $.Values.service.port | default {} }}}} number: {{{{ $.Values.service.port | default {service_port} }}}}
{{{{- end }}}} {{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
{{{{- end }}}} {{{{- end }}}}
"#, "#,
self.service_port
); );
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?; fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;

View File

@@ -0,0 +1,7 @@
use super::Application;
use async_trait::async_trait;
#[async_trait]
pub trait Webapp: Application {
fn dns(&self) -> String;
}

View File

@@ -0,0 +1,20 @@
/// Discover the current ArgoCD setup
///
/// 1. No argo installed
/// 2. Argo installed in current namespace
/// 3. Argo installed in different namespace (assuming cluster wide access)
///
/// For now we will go ahead with this very basic logic, there are many intricacies that can be
/// dealt with later, such as multitenant management in a single argo instance, credentials setup t
#[async_trait]
pub trait ArgoCD {
async fn ensure_installed() {
}
}
struct CurrentNamespaceArgo;
impl ArgoCD for CurrentNamespaceArgo {
}

View File

@@ -0,0 +1,2 @@
mod discover;
pub use discover::*;

View File

@@ -66,7 +66,8 @@ impl HelmCommandExecutor {
.is_none() .is_none()
{ {
if self.chart.repo.is_none() { if self.chart.repo.is_none() {
return Err(std::io::Error::other( return Err(std::io::Error::new(
ErrorKind::Other,
"Chart doesn't exist locally and no repo specified", "Chart doesn't exist locally and no repo specified",
)); ));
} }
@@ -106,10 +107,10 @@ impl HelmCommandExecutor {
} }
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> { pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
if let Some(d) = self.debug if let Some(d) = self.debug {
&& d if d {
{ args.push("--debug".to_string());
args.push("--debug".to_string()); }
} }
let path = if let Some(p) = self.path { let path = if let Some(p) = self.path {
@@ -233,28 +234,28 @@ impl HelmChart {
args.push(kv); args.push(kv);
} }
if let Some(crd) = self.include_crds if let Some(crd) = self.include_crds {
&& crd if crd {
{ args.push("--include-crds".to_string());
args.push("--include-crds".to_string()); }
} }
if let Some(st) = self.skip_tests if let Some(st) = self.skip_tests {
&& st if st {
{ args.push("--skip-tests".to_string());
args.push("--skip-tests".to_string()); }
} }
if let Some(sh) = self.skip_hooks if let Some(sh) = self.skip_hooks {
&& sh if sh {
{ args.push("--no-hooks".to_string());
args.push("--no-hooks".to_string()); }
} }
if let Some(d) = self.debug if let Some(d) = self.debug {
&& d if d {
{ args.push("--debug".to_string());
args.push("--debug".to_string()); }
} }
args args

View File

@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
} }
for f in self.score.files.iter() { for f in self.score.files.iter() {
http_server.serve_file_content(f).await? http_server.serve_file_content(&f).await?
} }
http_server.commit_config().await?; http_server.commit_config().await?;

View File

@@ -92,7 +92,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
); );
return Err(InterpretError::new(format!( return Err(InterpretError::new(format!(
"Could not select host : {}", "Could not select host : {}",
e e.to_string()
))); )));
} }
} }

View File

@@ -17,3 +17,4 @@ pub mod prometheus;
pub mod storage; pub mod storage;
pub mod tenant; pub mod tenant;
pub mod tftp; pub mod tftp;
pub mod argocd;

View File

@@ -9,7 +9,9 @@ use crate::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
application::Application, application::Application,
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability, monitoring::kube_prometheus::crd::{
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
},
prometheus::prometheus::PrometheusApplicationMonitoring, prometheus::prometheus::PrometheusApplicationMonitoring,
}, },
score::Score, score::Score,

View File

@@ -1,8 +1,12 @@
use std::collections::BTreeMap;
use kube::CustomResource; use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
LabelSelector, PrometheusSpec,
};
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 /// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -52,12 +52,6 @@ pub struct OKDSetup02BootstrapInterpret {
status: InterpretStatus, status: InterpretStatus,
} }
impl Default for OKDSetup02BootstrapInterpret {
fn default() -> Self {
Self::new()
}
}
impl OKDSetup02BootstrapInterpret { impl OKDSetup02BootstrapInterpret {
pub fn new() -> Self { pub fn new() -> Self {
let version = Version::from("1.0.0").unwrap(); let version = Version::from("1.0.0").unwrap();
@@ -104,9 +98,9 @@ impl OKDSetup02BootstrapInterpret {
InterpretError::new(format!("Failed to create okd installation directory : {e}")) InterpretError::new(format!("Failed to create okd installation directory : {e}"))
})?; })?;
if !exit_status.success() { if !exit_status.success() {
return Err(InterpretError::new( return Err(InterpretError::new(format!(
"Failed to create okd installation directory".to_string(), "Failed to create okd installation directory"
)); )));
} else { } else {
info!( info!(
"Created OKD installation directory {}", "Created OKD installation directory {}",

View File

@@ -254,7 +254,7 @@ impl RHOBAlertingInterpret {
let stack = MonitoringStack { let stack = MonitoringStack {
metadata: ObjectMeta { metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone())), name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
namespace: Some(self.sender.namespace.clone()), namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()), labels: Some([("monitoring-stack".into(), "true".into())].into()),
..Default::default() ..Default::default()
@@ -278,7 +278,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone())) .get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone()); let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = "alertmanager-operated".to_string(); let backend_service = format!("alertmanager-operated");
let namespace = self.sender.namespace.clone(); let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore { let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
@@ -295,7 +295,7 @@ impl RHOBAlertingInterpret {
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone())) .get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?; .await?;
let name = format!("{}-prometheus", self.sender.namespace.clone()); let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = "prometheus-operated".to_string(); let backend_service = format!("prometheus-operated");
let prometheus_ingress = K8sIngressScore { let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name), name: fqdn!(&name),
host: fqdn!(&prometheus_domain), host: fqdn!(&prometheus_domain),

View File

@@ -25,7 +25,7 @@ pub struct CephRemoveOsd {
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd { impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String { fn name(&self) -> String {
"CephRemoveOsdScore".to_string() format!("CephRemoveOsdScore")
} }
#[doc(hidden)] #[doc(hidden)]
@@ -118,14 +118,14 @@ impl CephRemoveOsdInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -181,14 +181,15 @@ impl CephRemoveOsdInterpret {
) )
.await?; .await?;
if let Some(deployment) = dep if let Some(deployment) = dep {
&& let Some(status) = deployment.status if let Some(status) = deployment.status {
&& status.replicas.unwrap_or(1) == 0 if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
&& status.ready_replicas.unwrap_or(1) == 0 {
{ return Ok(Outcome::success(
return Ok(Outcome::success( "Deployment successfully scaled down.".to_string(),
"Deployment successfully scaled down.".to_string(), ));
)); }
}
} }
if start.elapsed() > timeout { if start.elapsed() > timeout {

View File

@@ -20,7 +20,7 @@ pub struct CephVerifyClusterHealth {
impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth { impl<T: Topology + K8sclient> Score<T> for CephVerifyClusterHealth {
fn name(&self) -> String { fn name(&self) -> String {
"CephValidateClusterHealth".to_string() format!("CephValidateClusterHealth")
} }
fn create_interpret(&self) -> Box<dyn Interpret<T>> { fn create_interpret(&self) -> Box<dyn Interpret<T>> {
@@ -80,14 +80,14 @@ impl CephVerifyClusterHealthInterpret {
if let Some(status) = deployment.status { if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0); let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 { if ready_count >= 1 {
Ok(Outcome::success(format!( return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).", "'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count &toolbox_dep, ready_count
))) )));
} else { } else {
Err(InterpretError::new( return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(), "ceph-tool-box not ready in cluster".to_string(),
)) ));
} }
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
@@ -123,9 +123,9 @@ impl CephVerifyClusterHealthInterpret {
.await?; .await?;
if health.contains("HEALTH_OK") { if health.contains("HEALTH_OK") {
Ok(Outcome::success( return Ok(Outcome::success(
"Ceph Cluster in healthy state".to_string(), "Ceph Cluster in healthy state".to_string(),
)) ));
} else { } else {
Err(InterpretError::new(format!( Err(InterpretError::new(format!(
"Ceph cluster unhealthy {}", "Ceph cluster unhealthy {}",