Compare commits

..

1 Commits

Author SHA1 Message Date
ec794f076e wip: multisite application feature with stateless and statefull application traits
Some checks failed
Run Check Script / check (pull_request) Failing after 19s
2025-09-09 14:18:00 -04:00
30 changed files with 253 additions and 390 deletions

1
Cargo.lock generated
View File

@@ -3124,7 +3124,6 @@ dependencies = [
"fxhash", "fxhash",
"newline-converter", "newline-converter",
"once_cell", "once_cell",
"tempfile",
"unicode-segmentation", "unicode-segmentation",
"unicode-width 0.1.14", "unicode-width 0.1.14",
] ]

View File

@@ -14,8 +14,7 @@ members = [
"harmony_composer", "harmony_composer",
"harmony_inventory_agent", "harmony_inventory_agent",
"harmony_secret_derive", "harmony_secret_derive",
"harmony_secret", "harmony_secret", "adr/agent_discovery/mdns",
"adr/agent_discovery/mdns",
] ]
[workspace.package] [workspace.package]
@@ -51,7 +50,7 @@ k8s-openapi = { version = "0.25", features = ["v1_30"] }
serde_yaml = "0.9" serde_yaml = "0.9"
serde-value = "0.7" serde-value = "0.7"
http = "1.2" http = "1.2"
inquire = { version = "0.7", features = ["editor"] } inquire = "0.7"
convert_case = "0.8" convert_case = "0.8"
chrono = "0.4" chrono = "0.4"
similar = "2" similar = "2"
@@ -67,11 +66,5 @@ thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] } serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127" serde_json = "1.0.127"
askama = "0.14" askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = [ reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
"blocking",
"stream",
"rustls-tls",
"http2",
"json",
], default-features = false }

View File

@@ -27,6 +27,7 @@ async fn main() {
}; };
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(), name: "example-monitoring".to_string(),
domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()),
project_root: PathBuf::from("./examples/rust/webapp"), project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -17,6 +17,7 @@ use harmony_types::net::Url;
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "test-rhob-monitoring".to_string(), name: "test-rhob-monitoring".to_string(),
domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()),
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -19,6 +19,7 @@ use harmony_macros::hurl;
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "harmony-example-rust-webapp".to_string(), name: "harmony-example-rust-webapp".to_string(),
domain: hurl!("https://rustapp.harmony.example.com"),
project_root: PathBuf::from("./webapp"), project_root: PathBuf::from("./webapp"),
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 3000, service_port: 3000,

View File

@@ -1,21 +1,23 @@
use std::{path::PathBuf, sync::Arc};
use harmony::{ use harmony::{
inventory::Inventory, inventory::Inventory,
modules::{ modules::{
application::{ application::{
ApplicationScore, RustWebFramework, RustWebapp, ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring, rhob_monitoring::RHOBMonitoring}, features::{ContinuousDelivery, Monitoring},
}, },
monitoring::alert_channel::discord_alert_channel::DiscordWebhook, monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
}, },
topology::K8sAnywhereTopology, topology::K8sAnywhereTopology,
}; };
use harmony_macros::hurl; use harmony_types::net::Url;
use std::{path::PathBuf, sync::Arc};
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
let application = Arc::new(RustWebapp { let application = Arc::new(RustWebapp {
name: "harmony-example-tryrust".to_string(), name: "harmony-example-tryrust".to_string(),
domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()),
project_root: PathBuf::from("./tryrust.org"), project_root: PathBuf::from("./tryrust.org"),
framework: Some(RustWebFramework::Leptos), framework: Some(RustWebFramework::Leptos),
service_port: 8080, service_port: 8080,
@@ -23,7 +25,7 @@ async fn main() {
let discord_receiver = DiscordWebhook { let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(), name: "test-discord".to_string(),
url: hurl!("https://discord.doesnt.exist.com"), url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
}; };
let app = ApplicationScore { let app = ApplicationScore {
@@ -31,7 +33,7 @@ async fn main() {
Box::new(ContinuousDelivery { Box::new(ContinuousDelivery {
application: application.clone(), application: application.clone(),
}), }),
Box::new(RHOBMonitoring { Box::new(Monitoring {
application: application.clone(), application: application.clone(),
alert_receiver: vec![Box::new(discord_receiver)], alert_receiver: vec![Box::new(discord_receiver)],
}), }),

View File

@@ -10,11 +10,7 @@ testing = []
[dependencies] [dependencies]
hex = "0.4" hex = "0.4"
reqwest = { version = "0.11", features = [ reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false }
"blocking",
"json",
"rustls-tls",
], default-features = false }
russh = "0.45.0" russh = "0.45.0"
rust-ipmi = "0.1.1" rust-ipmi = "0.1.1"
semver = "1.0.23" semver = "1.0.23"

View File

@@ -1,7 +0,0 @@
use crate::topology::PreparationError;
use async_trait::async_trait;
#[async_trait]
pub trait Ingress {
async fn get_domain(&self, service: &str) -> Result<String, PreparationError>;
}

View File

@@ -1,7 +1,6 @@
use std::{process::Command, sync::Arc}; use std::{process::Command, sync::Arc};
use async_trait::async_trait; use async_trait::async_trait;
use kube::api::GroupVersionKind;
use log::{debug, info, warn}; use log::{debug, info, warn};
use serde::Serialize; use serde::Serialize;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
@@ -23,7 +22,6 @@ use crate::{
}, },
}, },
score::Score, score::Score,
topology::ingress::Ingress,
}; };
use super::{ use super::{
@@ -200,26 +198,6 @@ impl K8sAnywhereTopology {
} }
} }
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
let client = self.k8s_client().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
.await?;
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
if ready_replicas >= 1 {
return Ok(());
} else {
return Err(PreparationError::new(
"openshift-ingress-operator not available".to_string(),
));
}
}
fn is_helm_available(&self) -> Result<(), String> { fn is_helm_available(&self) -> Result<(), String> {
let version_result = Command::new("helm") let version_result = Command::new("helm")
.arg("version") .arg("version")
@@ -372,8 +350,6 @@ impl K8sAnywhereTopology {
if let Some(Some(k8s_state)) = self.k8s_state.get() { if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source { match k8s_state.source {
K8sSource::LocalK3d => { K8sSource::LocalK3d => {
warn!("Installing observability operator is not supported on LocalK3d source");
return Ok(PreparationOutcome::Noop);
debug!("installing cluster observability operator"); debug!("installing cluster observability operator");
todo!(); todo!();
let op_score = let op_score =
@@ -552,7 +528,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
match self.config.harmony_profile.to_lowercase().as_str() { match self.config.harmony_profile.to_lowercase().as_str() {
"staging" => DeploymentTarget::Staging, "staging" => DeploymentTarget::Staging,
"production" => DeploymentTarget::Production, "production" => DeploymentTarget::Production,
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"), _ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"),
} }
} }
} }
@@ -574,45 +550,3 @@ impl TenantManager for K8sAnywhereTopology {
.await .await
} }
} }
#[async_trait]
impl Ingress for K8sAnywhereTopology {
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
let client = self.k8s_client().await?;
if let Some(Some(k8s_state)) = self.k8s_state.get() {
match k8s_state.source {
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
K8sSource::Kubeconfig => {
self.openshift_ingress_operator_available().await?;
let gvk = GroupVersionKind {
group: "operator.openshift.io".into(),
version: "v1".into(),
kind: "IngressController".into(),
};
let ic = client
.get_resource_json_value(
"default",
Some("openshift-ingress-operator"),
&gvk,
)
.await
.map_err(|_| {
PreparationError::new("Failed to fetch IngressController".to_string())
})?;
match ic.data["status"]["domain"].as_str() {
Some(domain) => Ok(format!("{service}.{domain}")),
None => Err(PreparationError::new("Could not find domain".to_string())),
}
}
}
} else {
Err(PreparationError::new(
"Cannot get domain: unable to detect K8s state".to_string(),
))
}
}
}

View File

@@ -1,5 +1,4 @@
mod ha_cluster; mod ha_cluster;
pub mod ingress;
use harmony_types::net::IpAddress; use harmony_types::net::IpAddress;
mod host_binding; mod host_binding;
mod http; mod http;

View File

@@ -14,9 +14,7 @@ use crate::{
features::{ArgoApplication, ArgoHelmScore}, features::{ArgoApplication, ArgoHelmScore},
}, },
score::Score, score::Score,
topology::{ topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology},
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
},
}; };
/// ContinuousDelivery in Harmony provides this functionality : /// ContinuousDelivery in Harmony provides this functionality :
@@ -52,6 +50,55 @@ pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
} }
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> { impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String>
where
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
{
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
}
async fn deploy_to_local_k3d( async fn deploy_to_local_k3d(
&self, &self,
app_name: String, app_name: String,
@@ -138,74 +185,24 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
#[async_trait] #[async_trait]
impl< impl<
A: OCICompliant + HelmPackage + Clone + 'static, A: OCICompliant + HelmPackage + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
> ApplicationFeature<T> for ContinuousDelivery<A> > ApplicationFeature<T> for ContinuousDelivery<A>
{ {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> { async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
let image = self.application.image_name(); let image = self.application.image_name();
let domain = topology
.get_domain(&self.application.name())
.await
.map_err(|e| e.to_string())?;
// TODO Write CI/CD workflow files // TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github // we can autotedect the CI type using the remote url (default to github action for github
// url, etc..) // url, etc..)
// Or ask for it when unknown // Or ask for it when unknown
let helm_chart = self let helm_chart = self.application.build_push_helm_package(&image).await?;
.application
.build_push_helm_package(&image, &domain)
.await?;
// TODO: Make building image configurable/skippable if image already exists (prompt)") // TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104 // https://git.nationtech.io/NationTech/harmony/issues/104
let image = self.application.build_push_oci_image().await?; let image = self.application.build_push_oci_image().await?;
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven self.deploy(topology, helm_chart, image).await
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
} }
fn name(&self) -> String { fn name(&self) -> String {
"ContinuousDelivery".to_string() "ContinuousDelivery".to_string()

View File

@@ -13,8 +13,7 @@ use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository}, modules::helm::chart::{HelmChartScore, HelmRepository},
score::Score, score::Score,
topology::{ topology::{
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress, HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient,
k8s::K8sClient,
}, },
}; };
use harmony_types::id::Id; use harmony_types::id::Id;
@@ -28,7 +27,7 @@ pub struct ArgoHelmScore {
pub argo_apps: Vec<ArgoApplication>, pub argo_apps: Vec<ArgoApplication>,
} }
impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore { impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(ArgoInterpret { Box::new(ArgoInterpret {
score: self.clone(), score: self.clone(),
@@ -48,14 +47,17 @@ pub struct ArgoInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret { impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
async fn execute( async fn execute(
&self, &self,
inventory: &Inventory, inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let k8s_client = topology.k8s_client().await?; let k8s_client = topology.k8s_client().await?;
let domain = topology.get_domain("argo").await?; let domain = self
.get_host_domain(k8s_client.clone(), self.score.openshift)
.await?;
let domain = format!("argo.{domain}");
let helm_score = let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain); argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);

View File

@@ -1,5 +1,6 @@
mod endpoint; mod endpoint;
pub mod rhob_monitoring; pub mod rhob_monitoring;
mod multisite;
pub use endpoint::*; pub use endpoint::*;
mod monitoring; mod monitoring;

View File

@@ -1,8 +1,10 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature}; use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::MultiTargetTopology; use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress;
use crate::{ use crate::{
inventory::Inventory, inventory::Inventory,
modules::monitoring::{ modules::monitoring::{
@@ -17,12 +19,8 @@ use crate::{
}; };
use async_trait::async_trait; use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose}; use base64::{Engine as _, engine::general_purpose};
use harmony_secret::SecretManager;
use harmony_secret_derive::Secret;
use harmony_types::net::Url; use harmony_types::net::Url;
use log::{debug, info}; use log::{debug, info};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Monitoring { pub struct Monitoring {
@@ -38,9 +36,8 @@ impl<
+ TenantManager + TenantManager
+ K8sclient + K8sclient
+ MultiTargetTopology + MultiTargetTopology
+ PrometheusApplicationMonitoring<CRDPrometheus> + std::fmt::Debug
+ Ingress + PrometheusApplicationMonitoring<CRDPrometheus>,
+ std::fmt::Debug,
> ApplicationFeature<T> for Monitoring > ApplicationFeature<T> for Monitoring
{ {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> { async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
@@ -50,7 +47,6 @@ impl<
.await .await
.map(|ns| ns.name.clone()) .map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name()); .unwrap_or_else(|| self.application.name());
let domain = topology.get_domain("ntfy").await.unwrap();
let mut alerting_score = ApplicationMonitoringScore { let mut alerting_score = ApplicationMonitoringScore {
sender: CRDPrometheus { sender: CRDPrometheus {
@@ -62,17 +58,19 @@ impl<
}; };
let ntfy = NtfyScore { let ntfy = NtfyScore {
namespace: namespace.clone(), namespace: namespace.clone(),
host: domain, host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
}; };
ntfy.interpret(&Inventory::empty(), topology) ntfy.interpret(&Inventory::empty(), topology)
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap(); let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
let ntfy_default_auth_header = format!( let ntfy_default_auth_header = format!(
"Basic {}", "Basic {}",
general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password)) general_purpose::STANDARD.encode(format!(
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
))
); );
debug!("ntfy_default_auth_header: {ntfy_default_auth_header}"); debug!("ntfy_default_auth_header: {ntfy_default_auth_header}");
@@ -102,17 +100,9 @@ impl<
.interpret(&Inventory::empty(), topology) .interpret(&Inventory::empty(), topology)
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
Ok(()) Ok(())
} }
fn name(&self) -> String { fn name(&self) -> String {
"Monitoring".to_string() "Monitoring".to_string()
} }
} }
#[derive(Secret, Serialize, Deserialize, Clone, Debug)]
struct NtfyAuth {
username: String,
password: String,
}

View File

@@ -0,0 +1,49 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature, StatelessApplication};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::{K8sAnywhereTopology, MultiTargetTopology};
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use harmony_types::net::Url;
use log::{debug, info};
trait DebugTopology: Topology + std::fmt::Debug {}
#[derive(Debug, Clone)]
pub struct Multisite {
app: Arc<dyn StatelessApplication>,
secondary_site: Arc<K8sAnywhereTopology>,
}
#[async_trait]
impl<T: Topology> ApplicationFeature<T> for Multisite {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
todo!(
"
- Find a way to get pvs for this application
- find the pv csi volumes uuid
- run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot
- enjoy
"
)
}
fn name(&self) -> String {
"Multisite".to_string()
}
}

View File

@@ -6,7 +6,6 @@ use crate::modules::monitoring::application_monitoring::rhobs_application_monito
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::topology::MultiTargetTopology; use crate::topology::MultiTargetTopology;
use crate::topology::ingress::Ingress;
use crate::{ use crate::{
inventory::Inventory, inventory::Inventory,
modules::monitoring::{ modules::monitoring::{
@@ -38,7 +37,6 @@ impl<
+ TenantManager + TenantManager
+ K8sclient + K8sclient
+ MultiTargetTopology + MultiTargetTopology
+ Ingress
+ std::fmt::Debug + std::fmt::Debug
+ PrometheusApplicationMonitoring<RHOBObservability>, + PrometheusApplicationMonitoring<RHOBObservability>,
> ApplicationFeature<T> for RHOBMonitoring > ApplicationFeature<T> for RHOBMonitoring
@@ -61,10 +59,7 @@ impl<
}; };
let ntfy = NtfyScore { let ntfy = NtfyScore {
namespace: namespace.clone(), namespace: namespace.clone(),
host: topology host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
.get_domain("ntfy")
.await
.map_err(|e| format!("Could not get domain {e}"))?,
}; };
ntfy.interpret(&Inventory::empty(), topology) ntfy.interpret(&Inventory::empty(), topology)
.await .await

View File

@@ -2,6 +2,10 @@ mod feature;
pub mod features; pub mod features;
pub mod oci; pub mod oci;
mod rust; mod rust;
mod stateless;
mod stateful;
pub use stateless::*;
pub use stateful::*;
use std::sync::Arc; use std::sync::Arc;
pub use feature::*; pub use feature::*;

View File

@@ -1,6 +1,7 @@
use super::Application;
use async_trait::async_trait; use async_trait::async_trait;
use super::Application;
#[async_trait] #[async_trait]
pub trait OCICompliant: Application { pub trait OCICompliant: Application {
async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here async fn build_push_oci_image(&self) -> Result<String, String>; // TODO consider using oci-spec and friends crates here
@@ -16,10 +17,5 @@ pub trait HelmPackage: Application {
/// ///
/// # Arguments /// # Arguments
/// * `image_url` - The full URL of the OCI container image to be used in the Deployment. /// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
/// * `domain` - The domain where the application is hosted. async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>;
async fn build_push_helm_package(
&self,
image_url: &str,
domain: &str,
) -> Result<String, String>;
} }

View File

@@ -1,4 +1,5 @@
use std::fs::{self}; use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process; use std::process;
use std::sync::Arc; use std::sync::Arc;
@@ -12,11 +13,12 @@ use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt; use futures_util::StreamExt;
use log::{debug, info, log_enabled}; use log::{debug, info, log_enabled};
use serde::Serialize; use serde::Serialize;
use tar::{Builder, Header}; use tar::{Archive, Builder, Header};
use walkdir::WalkDir; use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::{score::Score, topology::Topology}; use crate::{score::Score, topology::Topology};
use harmony_types::net::Url;
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@@ -56,6 +58,7 @@ pub enum RustWebFramework {
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct RustWebapp { pub struct RustWebapp {
pub name: String, pub name: String,
pub domain: Url,
/// The path to the root of the Rust project to be containerized. /// The path to the root of the Rust project to be containerized.
pub project_root: PathBuf, pub project_root: PathBuf,
pub service_port: u32, pub service_port: u32,
@@ -70,17 +73,12 @@ impl Application for RustWebapp {
#[async_trait] #[async_trait]
impl HelmPackage for RustWebapp { impl HelmPackage for RustWebapp {
async fn build_push_helm_package( async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
&self,
image_url: &str,
domain: &str,
) -> Result<String, String> {
info!("Starting Helm chart build and push for '{}'", self.name); info!("Starting Helm chart build and push for '{}'", self.name);
// 1. Create the Helm chart files on disk. // 1. Create the Helm chart files on disk.
let chart_dir = self let chart_dir = self
.create_helm_chart_files(image_url, domain) .create_helm_chart_files(image_url)
.await
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?; .map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
info!("Successfully created Helm chart files in {:?}", chart_dir); info!("Successfully created Helm chart files in {:?}", chart_dir);
@@ -208,7 +206,7 @@ impl RustWebapp {
} }
} }
///normalizes timestamp and ignores files that will bust the docker cach ///normalizes timestamp and ignores files that will bust the docker cache
async fn create_deterministic_tar( async fn create_deterministic_tar(
&self, &self,
project_root: &std::path::Path, project_root: &std::path::Path,
@@ -222,7 +220,6 @@ impl RustWebapp {
".git", ".git",
".github", ".github",
".harmony_generated", ".harmony_generated",
"harmony",
"node_modules", "node_modules",
]; ];
let mut entries: Vec<_> = WalkDir::new(project_root) let mut entries: Vec<_> = WalkDir::new(project_root)
@@ -268,6 +265,8 @@ impl RustWebapp {
let docker = Docker::connect_with_socket_defaults().unwrap(); let docker = Docker::connect_with_socket_defaults().unwrap();
// let push_options = PushImageOptionsBuilder::new().tag(tag);
let mut push_image_stream = docker.push_image( let mut push_image_stream = docker.push_image(
image_tag, image_tag,
Some(PushImageOptionsBuilder::new().build()), Some(PushImageOptionsBuilder::new().build()),
@@ -275,8 +274,6 @@ impl RustWebapp {
); );
while let Some(msg) = push_image_stream.next().await { while let Some(msg) = push_image_stream.next().await {
// let msg = msg?;
// TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
debug!("Message: {msg:?}"); debug!("Message: {msg:?}");
} }
@@ -411,10 +408,9 @@ impl RustWebapp {
} }
/// Creates all necessary files for a basic Helm chart. /// Creates all necessary files for a basic Helm chart.
async fn create_helm_chart_files( fn create_helm_chart_files(
&self, &self,
image_url: &str, image_url: &str,
domain: &str,
) -> Result<PathBuf, Box<dyn std::error::Error>> { ) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_name = format!("{}-chart", self.name); let chart_name = format!("{}-chart", self.name);
let chart_dir = self let chart_dir = self
@@ -464,15 +460,21 @@ ingress:
enabled: true enabled: true
# Annotations for cert-manager to handle SSL. # Annotations for cert-manager to handle SSL.
annotations: annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
# Add other annotations like nginx ingress class if needed # Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
hosts: hosts:
- host: {} - host: chart-example.local
paths: paths:
- path: / - path: /
pathType: ImplementationSpecific pathType: ImplementationSpecific
tls:
- secretName: {}-tls
hosts:
- chart-example.local
"#, "#,
chart_name, image_repo, image_tag, self.service_port, domain, chart_name, image_repo, image_tag, self.service_port, self.name
); );
fs::write(chart_dir.join("values.yaml"), values_yaml)?; fs::write(chart_dir.join("values.yaml"), values_yaml)?;

View File

@@ -0,0 +1,6 @@
use crate::modules::application::Application;
/// A StatefulApplication is an application bundle that writes persistent data.
///
/// This will enable backup features, stateful multisite replication, etc.
pub trait StatefulApplication: Application {}

View File

@@ -0,0 +1,26 @@
use crate::modules::application::{Application, features::ContinuousDeliveryApplication};
/// Marker trait for stateless application that can be deployed anywhere without worrying about
/// data.
///
/// This includes Applications fitting these categories :
///
/// - Application with all files built into the docker image and never written to, can be mounted
/// read-only
/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
/// not require any replication/backup logic to operate
/// - Not supported : an application that writes state to a volume that must be shared or kept
/// to maintain a quorum across various instances
/// - Application connecting to a database/datastore accessible from anywhere such as
/// - Public bucket endpoint
/// - Publicly accessible
/// - Application connecting to a private database external to this application, accessible from the
/// deployment target
/// - Ensuring the private database is reachable is out of scope of this trait (for now)
///
/// The entire application definition **must not** require any persistent volume or include a
/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
///
/// Typically, applications that can be autoscaled without additional complexity fit the
/// StatelessApplication requirements.
pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}

View File

@@ -153,10 +153,6 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() { let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
Some(yaml_str) => { Some(yaml_str) => {
tf = temp_file::with_contents(yaml_str.as_bytes()); tf = temp_file::with_contents(yaml_str.as_bytes());
debug!(
"values yaml string for chart {} :\n {yaml_str}",
self.score.chart_name
);
Some(tf.path()) Some(tf.path())
} }
None => None, None => None,

View File

@@ -40,7 +40,6 @@ pub struct K8sIngressScore {
pub path: Option<IngressPath>, pub path: Option<IngressPath>,
pub path_type: Option<PathType>, pub path_type: Option<PathType>,
pub namespace: Option<fqdn::FQDN>, pub namespace: Option<fqdn::FQDN>,
pub ingress_class_name: Option<String>,
} }
impl<T: Topology + K8sclient> Score<T> for K8sIngressScore { impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
@@ -55,18 +54,12 @@ impl<T: Topology + K8sclient> Score<T> for K8sIngressScore {
None => PathType::Prefix, None => PathType::Prefix,
}; };
let ingress_class = match self.ingress_class_name.clone() {
Some(ingress_class_name) => ingress_class_name,
None => format!("\"default\""),
};
let ingress = json!( let ingress = json!(
{ {
"metadata": { "metadata": {
"name": self.name.to_string(), "name": self.name.to_string(),
}, },
"spec": { "spec": {
"ingressClassName": ingress_class.as_str(),
"rules": [ "rules": [
{ "host": self.host.to_string(), { "host": self.host.to_string(),
"http": { "http": {

View File

@@ -147,7 +147,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
port: 8080, port: 8080,
path: Some(ingress_path), path: Some(ingress_path),
path_type: None, path_type: None,
ingress_class_name: None,
namespace: self namespace: self
.get_namespace() .get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())), .map(|nbs| fqdn!(nbs.to_string().as_str())),

View File

@@ -4,9 +4,7 @@ use kube::CustomResource;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
LabelSelector, PrometheusSpec,
};
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 /// MonitoringStack CRD for monitoring.rhobs/v1alpha1
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]

View File

@@ -45,12 +45,6 @@ service:
ingress: ingress:
enabled: {ingress_enabled} enabled: {ingress_enabled}
hosts:
- host: {host}
paths:
- path: /
pathType: ImplementationSpecific
route: route:
enabled: {route_enabled} enabled: {route_enabled}

View File

@@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule {
pub fn alert_container_restarting() -> PrometheusAlertRule { pub fn alert_container_restarting() -> PrometheusAlertRule {
PrometheusAlertRule { PrometheusAlertRule {
alert: "ContainerRestarting".into(), alert: "ContainerRestarting".into(),
expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(), expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(),
r#for: Some("30s".into()), r#for: Some("5m".into()),
labels: HashMap::from([("severity".into(), "warning".into())]), labels: HashMap::from([("severity".into(), "warning".into())]),
annotations: HashMap::from([ annotations: HashMap::from([
( (

View File

@@ -1,4 +1,3 @@
use fqdn::fqdn;
use std::fs; use std::fs;
use std::{collections::BTreeMap, sync::Arc}; use std::{collections::BTreeMap, sync::Arc};
use tempfile::tempdir; use tempfile::tempdir;
@@ -9,7 +8,6 @@ use log::{debug, info};
use serde::Serialize; use serde::Serialize;
use std::process::Command; use std::process::Command;
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard; use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
@@ -25,18 +23,12 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
PrometheusRule, PrometheusRuleSpec, RuleGroup, PrometheusRule, PrometheusRuleSpec, RuleGroup,
}; };
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
build_prom_role, build_prom_rolebinding, build_prom_service_account,
};
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
ServiceMonitor, ServiceMonitorSpec, ServiceMonitor, ServiceMonitorSpec,
}; };
use crate::score::Score; use crate::score::Score;
use crate::topology::ingress::Ingress;
use crate::topology::oberservability::monitoring::AlertReceiver; use crate::topology::oberservability::monitoring::AlertReceiver;
use crate::topology::{K8sclient, Topology, k8s::K8sClient}; use crate::topology::{K8sclient, Topology, k8s::K8sClient};
use crate::{ use crate::{
@@ -56,8 +48,8 @@ pub struct RHOBAlertingScore {
pub prometheus_rules: Vec<RuleGroup>, pub prometheus_rules: Vec<RuleGroup>,
} }
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
Score<T> for RHOBAlertingScore for RHOBAlertingScore
{ {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(RHOBAlertingInterpret { Box::new(RHOBAlertingInterpret {
@@ -82,20 +74,19 @@ pub struct RHOBAlertingInterpret {
} }
#[async_trait] #[async_trait]
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>> impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
Interpret<T> for RHOBAlertingInterpret for RHOBAlertingInterpret
{ {
async fn execute( async fn execute(
&self, &self,
inventory: &Inventory, _inventory: &Inventory,
topology: &T, topology: &T,
) -> Result<Outcome, InterpretError> { ) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap(); let client = topology.k8s_client().await.unwrap();
self.ensure_grafana_operator().await?; self.ensure_grafana_operator().await?;
self.install_prometheus(inventory, topology, &client) self.install_prometheus(&client).await?;
.await?;
self.install_client_kube_metrics().await?; self.install_client_kube_metrics().await?;
self.install_grafana(inventory, topology, &client).await?; self.install_grafana(&client).await?;
self.install_receivers(&self.sender, &self.receivers) self.install_receivers(&self.sender, &self.receivers)
.await?; .await?;
self.install_rules(&self.prometheus_rules, &client).await?; self.install_rules(&self.prometheus_rules, &client).await?;
@@ -221,8 +212,7 @@ impl RHOBAlertingInterpret {
let output = Command::new("helm") let output = Command::new("helm")
.args([ .args([
"upgrade", "install",
"--install",
"grafana-operator", "grafana-operator",
"grafana-operator/grafana-operator", "grafana-operator/grafana-operator",
"--namespace", "--namespace",
@@ -236,7 +226,7 @@ impl RHOBAlertingInterpret {
if !output.status.success() { if !output.status.success() {
return Err(InterpretError::new(format!( return Err(InterpretError::new(format!(
"helm upgrade --install failed:\nstdout: {}\nstderr: {}", "helm install failed:\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr) String::from_utf8_lossy(&output.stderr)
))); )));
@@ -248,31 +238,25 @@ impl RHOBAlertingInterpret {
))) )))
} }
async fn install_prometheus<T: Topology + K8sclient + Ingress>( async fn install_prometheus(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
debug!( debug!(
"installing crd-prometheuses in namespace {}", "installing crd-prometheuses in namespace {}",
self.sender.namespace.clone() self.sender.namespace.clone()
); );
debug!("building role/rolebinding/serviceaccount for crd-prometheus");
let stack = MonitoringStack { let stack = MonitoringStack {
metadata: ObjectMeta { metadata: ObjectMeta {
name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()), name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()),
namespace: Some(self.sender.namespace.clone()), namespace: Some(self.sender.namespace.clone()),
labels: Some([("monitoring-stack".into(), "true".into())].into()), labels: Some([("coo".into(), "example".into())].into()),
..Default::default() ..Default::default()
}, },
spec: MonitoringStackSpec { spec: MonitoringStackSpec {
log_level: Some("debug".into()), log_level: Some("debug".into()),
retention: Some("1d".into()), retention: Some("1d".into()),
resource_selector: Some(LabelSelector { resource_selector: Some(LabelSelector {
match_labels: Default::default(), match_labels: [("app".into(), "demo".into())].into(),
match_expressions: vec![], ..Default::default()
}), }),
}, },
}; };
@@ -281,42 +265,6 @@ impl RHOBAlertingInterpret {
.apply(&stack, Some(&self.sender.namespace.clone())) .apply(&stack, Some(&self.sender.namespace.clone()))
.await .await
.map_err(|e| InterpretError::new(e.to_string()))?; .map_err(|e| InterpretError::new(e.to_string()))?;
let alert_manager_domain = topology
.get_domain(&format!("alert-manager-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-alert-manager", self.sender.namespace.clone());
let backend_service = format!("alertmanager-operated");
let namespace = self.sender.namespace.clone();
let alert_manager_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&alert_manager_domain),
backend_service: fqdn!(&backend_service),
port: 9093,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
let prometheus_domain = topology
.get_domain(&format!("prometheus-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-prometheus", self.sender.namespace.clone());
let backend_service = format!("prometheus-operated");
let prometheus_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&prometheus_domain),
backend_service: fqdn!(&backend_service),
port: 9090,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
alert_manager_ingress.interpret(inventory, topology).await?;
prometheus_ingress.interpret(inventory, topology).await?;
info!("installed rhob monitoring stack",); info!("installed rhob monitoring stack",);
Ok(Outcome::success(format!( Ok(Outcome::success(format!(
"successfully deployed rhob-prometheus {:#?}", "successfully deployed rhob-prometheus {:#?}",
@@ -324,6 +272,31 @@ impl RHOBAlertingInterpret {
))) )))
} }
async fn install_alert_manager(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let am = Alertmanager {
metadata: ObjectMeta {
name: Some(self.sender.namespace.clone()),
labels: Some(std::collections::BTreeMap::from([(
"alertmanagerConfig".to_string(),
"enabled".to_string(),
)])),
namespace: Some(self.sender.namespace.clone()),
..Default::default()
},
spec: AlertmanagerSpec::default(),
};
client
.apply(&am, Some(&self.sender.namespace.clone()))
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
Ok(Outcome::success(format!(
"successfully deployed service monitor {:#?}",
am.metadata.name
)))
}
async fn install_monitors( async fn install_monitors(
&self, &self,
mut monitors: Vec<ServiceMonitor>, mut monitors: Vec<ServiceMonitor>,
@@ -406,12 +379,7 @@ impl RHOBAlertingInterpret {
))) )))
} }
async fn install_grafana<T: Topology + K8sclient + Ingress>( async fn install_grafana(&self, client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
&self,
inventory: &Inventory,
topology: &T,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut label = BTreeMap::new(); let mut label = BTreeMap::new();
label.insert("dashboards".to_string(), "grafana".to_string()); label.insert("dashboards".to_string(), "grafana".to_string());
let labels = LabelSelector { let labels = LabelSelector {
@@ -497,23 +465,6 @@ impl RHOBAlertingInterpret {
.apply(&grafana, Some(&self.sender.namespace.clone())) .apply(&grafana, Some(&self.sender.namespace.clone()))
.await .await
.map_err(|e| InterpretError::new(e.to_string()))?; .map_err(|e| InterpretError::new(e.to_string()))?;
let domain = topology
.get_domain(&format!("grafana-{}", self.sender.namespace.clone()))
.await?;
let name = format!("{}-grafana", self.sender.namespace.clone());
let backend_service = format!("grafana-{}-service", self.sender.namespace.clone());
let grafana_ingress = K8sIngressScore {
name: fqdn!(&name),
host: fqdn!(&domain),
backend_service: fqdn!(&backend_service),
port: 3000,
path: Some("/".to_string()),
path_type: Some(PathType::Prefix),
namespace: Some(fqdn!(&namespace)),
ingress_class_name: Some("openshift-default".to_string()),
};
grafana_ingress.interpret(inventory, topology).await?;
Ok(Outcome::success(format!( Ok(Outcome::success(format!(
"successfully deployed grafana instance {:#?}", "successfully deployed grafana instance {:#?}",
grafana.metadata.name grafana.metadata.name

View File

@@ -120,26 +120,10 @@ impl SecretManager {
let ns = &manager.namespace; let ns = &manager.namespace;
let key = T::KEY; let key = T::KEY;
let secret_json = inquire::Editor::new(&format!( let secret_json = inquire::Text::new(&format!(
"Secret not found for {ns} {key}, paste the JSON here :", "Secret not found for {} {}, paste the JSON here :",
ns, key
)) ))
.with_formatter(&|data| {
let char_count = data.chars().count();
if char_count == 0 {
String::from("<skipped>")
} else if char_count <= 20 {
data.into()
} else {
let mut substr: String = data.chars().take(17).collect();
substr.push_str("...");
substr
}
})
.with_render_config(
inquire::ui::RenderConfig::default().with_canceled_prompt_indicator(
inquire::ui::Styled::new("<skipped>").with_fg(inquire::ui::Color::DarkYellow),
),
)
.prompt() .prompt()
.map_err(|e| { .map_err(|e| {
SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into()) SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into())

View File

@@ -2,8 +2,8 @@ mod downloadable_asset;
use downloadable_asset::*; use downloadable_asset::*;
use kube::Client; use kube::Client;
use log::{debug, info}; use log::debug;
use std::{ffi::OsStr, path::PathBuf}; use std::path::PathBuf;
const K3D_BIN_FILE_NAME: &str = "k3d"; const K3D_BIN_FILE_NAME: &str = "k3d";
@@ -213,19 +213,15 @@ impl K3d {
} }
} }
let client;
if !self.is_cluster_initialized() { if !self.is_cluster_initialized() {
debug!("Cluster is not initialized, initializing now"); debug!("Cluster is not initialized, initializing now");
client = self.initialize_cluster().await?; return self.initialize_cluster().await;
} else {
self.start_cluster().await?;
debug!("K3d and cluster are already properly set up");
client = self.create_kubernetes_client().await?;
} }
self.ensure_k3d_config_is_default(self.get_cluster_name()?)?; self.start_cluster().await?;
Ok(client)
debug!("K3d and cluster are already properly set up");
self.create_kubernetes_client().await
} }
// Private helper methods // Private helper methods
@@ -306,16 +302,7 @@ impl K3d {
S: AsRef<std::ffi::OsStr>, S: AsRef<std::ffi::OsStr>,
{ {
let binary_path = self.get_k3d_binary()?; let binary_path = self.get_k3d_binary()?;
self.run_command(binary_path, args) let output = std::process::Command::new(binary_path).args(args).output();
}
pub fn run_command<I, S, C>(&self, cmd: C, args: I) -> Result<std::process::Output, String>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
C: AsRef<OsStr>,
{
let output = std::process::Command::new(cmd).args(args).output();
match output { match output {
Ok(output) => { Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr); let stderr = String::from_utf8_lossy(&output.stderr);
@@ -324,7 +311,7 @@ impl K3d {
debug!("stdout : {}", stdout); debug!("stdout : {}", stdout);
Ok(output) Ok(output)
} }
Err(e) => Err(format!("Failed to execute command: {}", e)), Err(e) => Err(format!("Failed to execute k3d command: {}", e)),
} }
} }
@@ -336,38 +323,12 @@ impl K3d {
return Err(format!("Failed to create cluster: {}", stderr)); return Err(format!("Failed to create cluster: {}", stderr));
} }
info!("Successfully created k3d cluster '{}'", cluster_name); debug!("Successfully created k3d cluster '{}'", cluster_name);
Ok(())
}
fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to setup k3d kubeconfig : {}", stderr));
}
let output = self.run_command(
"kubectl",
["config", "use-context", &format!("k3d-{cluster_name}")],
)?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!(
"Failed to switch kubectl context to k3d : {}",
stderr
));
}
info!(
"kubectl is now using 'k3d-{}' as default context",
cluster_name
);
Ok(()) Ok(())
} }
async fn create_kubernetes_client(&self) -> Result<Client, String> { async fn create_kubernetes_client(&self) -> Result<Client, String> {
// TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92)
Client::try_default() Client::try_default()
.await .await
.map_err(|e| format!("Failed to create Kubernetes client: {}", e)) .map_err(|e| format!("Failed to create Kubernetes client: {}", e))