diff --git a/harmony/src/modules/application/features/continuous_delivery.rs b/harmony/src/modules/application/features/continuous_delivery.rs index 1bc2d9d..d3ee38a 100644 --- a/harmony/src/modules/application/features/continuous_delivery.rs +++ b/harmony/src/modules/application/features/continuous_delivery.rs @@ -50,6 +50,55 @@ pub struct ContinuousDelivery { } impl ContinuousDelivery { + pub async fn deploy(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String> + where + T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static, + { + // TODO: this is a temporary hack for demo purposes, the deployment target should be driven + // by the topology only and we should not have to know how to perform tasks like this for + // which the topology should be responsible. + // + // That said, this will require some careful architectural decisions, since the concept of + // deployment targets / profiles is probably a layer of complexity that we won't be + // completely able to avoid + // + // I'll try something for now that must be thought through after : att a deployment_profile + // function to the topology trait that returns a profile, then anybody who needs it can + // access it. This forces every Topology to understand the concept of targets though... So + // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it + // goes. It still does not feel right though. + // + // https://git.nationtech.io/NationTech/harmony/issues/106 + match topology.current_target() { + DeploymentTarget::LocalDev => { + info!("Deploying {} locally...", self.application.name()); + self.deploy_to_local_k3d(self.application.name(), helm_chart, image) + .await?; + } + target => { + info!("Deploying {} to target {target:?}", self.application.name()); + + let score = ArgoHelmScore { + namespace: format!("{}", self.application.name()), + openshift: true, + argo_apps: vec![ArgoApplication::from(CDApplicationConfig { + // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 + version: Version::from("0.1.0").unwrap(), + helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), + helm_chart_name: format!("{}-chart", self.application.name()), + values_overrides: None, + name: format!("{}", self.application.name()), + namespace: format!("{}", self.application.name()), + })], + }; + score + .interpret(&Inventory::empty(), topology) + .await + .unwrap(); + } + }; + Ok(()) + } async fn deploy_to_local_k3d( &self, app_name: String, @@ -153,50 +202,7 @@ impl< // https://git.nationtech.io/NationTech/harmony/issues/104 let image = self.application.build_push_oci_image().await?; - // TODO: this is a temporary hack for demo purposes, the deployment target should be driven - // by the topology only and we should not have to know how to perform tasks like this for - // which the topology should be responsible. - // - // That said, this will require some careful architectural decisions, since the concept of - // deployment targets / profiles is probably a layer of complexity that we won't be - // completely able to avoid - // - // I'll try something for now that must be thought through after : att a deployment_profile - // function to the topology trait that returns a profile, then anybody who needs it can - // access it. This forces every Topology to understand the concept of targets though... So - // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it - // goes. It still does not feel right though. - // - // https://git.nationtech.io/NationTech/harmony/issues/106 - match topology.current_target() { - DeploymentTarget::LocalDev => { - info!("Deploying {} locally...", self.application.name()); - self.deploy_to_local_k3d(self.application.name(), helm_chart, image) - .await?; - } - target => { - info!("Deploying {} to target {target:?}", self.application.name()); - - let score = ArgoHelmScore { - namespace: format!("{}", self.application.name()), - openshift: true, - argo_apps: vec![ArgoApplication::from(CDApplicationConfig { - // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 - version: Version::from("0.1.0").unwrap(), - helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), - helm_chart_name: format!("{}-chart", self.application.name()), - values_overrides: None, - name: format!("{}", self.application.name()), - namespace: format!("{}", self.application.name()), - })], - }; - score - .interpret(&Inventory::empty(), topology) - .await - .unwrap(); - } - }; - Ok(()) + self.deploy(topology, helm_chart, image).await } fn name(&self) -> String { "ContinuousDelivery".to_string() diff --git a/harmony/src/modules/application/features/mod.rs b/harmony/src/modules/application/features/mod.rs index 93f6412..9d6fbf5 100644 --- a/harmony/src/modules/application/features/mod.rs +++ b/harmony/src/modules/application/features/mod.rs @@ -1,5 +1,6 @@ mod endpoint; pub mod rhob_monitoring; +mod multisite; pub use endpoint::*; mod monitoring; diff --git a/harmony/src/modules/application/features/multisite.rs b/harmony/src/modules/application/features/multisite.rs new file mode 100644 index 0000000..1358397 --- /dev/null +++ b/harmony/src/modules/application/features/multisite.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +use crate::modules::application::{Application, ApplicationFeature, StatelessApplication}; +use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; +use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; + +use crate::topology::{K8sAnywhereTopology, MultiTargetTopology}; +use crate::{ + inventory::Inventory, + modules::monitoring::{ + alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore, + }, + score::Score, + topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, +}; +use crate::{ + modules::prometheus::prometheus::PrometheusApplicationMonitoring, + topology::oberservability::monitoring::AlertReceiver, +}; +use async_trait::async_trait; +use base64::{Engine as _, engine::general_purpose}; +use harmony_types::net::Url; +use log::{debug, info}; + +trait DebugTopology: Topology + std::fmt::Debug {} + +#[derive(Debug, Clone)] +pub struct Multisite { + app: Arc, + secondary_site: Arc, +} + +#[async_trait] +impl ApplicationFeature for Multisite { + async fn ensure_installed(&self, topology: &T) -> Result<(), String> { + + todo!( + " + - Find a way to get pvs for this application + - find the pv csi volumes uuid + - run rbd mirror image enable --pool mirrored-pool csi-vol- snapshot + - enjoy + " + ) + } + fn name(&self) -> String { + "Multisite".to_string() + } +} diff --git a/harmony/src/modules/application/mod.rs b/harmony/src/modules/application/mod.rs index 8e60984..6e25f13 100644 --- a/harmony/src/modules/application/mod.rs +++ b/harmony/src/modules/application/mod.rs @@ -2,6 +2,10 @@ mod feature; pub mod features; pub mod oci; mod rust; +mod stateless; +mod stateful; +pub use stateless::*; +pub use stateful::*; use std::sync::Arc; pub use feature::*; diff --git a/harmony/src/modules/application/rust.rs b/harmony/src/modules/application/rust.rs index 0d204cc..5818c3e 100644 --- a/harmony/src/modules/application/rust.rs +++ b/harmony/src/modules/application/rust.rs @@ -206,7 +206,7 @@ impl RustWebapp { } } - ///normalizes timestamp and ignores files that will bust the docker cach + ///normalizes timestamp and ignores files that will bust the docker cache async fn create_deterministic_tar( &self, project_root: &std::path::Path, diff --git a/harmony/src/modules/application/stateful.rs b/harmony/src/modules/application/stateful.rs new file mode 100644 index 0000000..015ccc7 --- /dev/null +++ b/harmony/src/modules/application/stateful.rs @@ -0,0 +1,6 @@ +use crate::modules::application::Application; + +/// A StatefulApplication is an application bundle that writes persistent data. +/// +/// This will enable backup features, stateful multisite replication, etc. +pub trait StatefulApplication: Application {} diff --git a/harmony/src/modules/application/stateless.rs b/harmony/src/modules/application/stateless.rs new file mode 100644 index 0000000..934d84d --- /dev/null +++ b/harmony/src/modules/application/stateless.rs @@ -0,0 +1,26 @@ +use crate::modules::application::{Application, features::ContinuousDeliveryApplication}; + +/// Marker trait for stateless application that can be deployed anywhere without worrying about +/// data. +/// +/// This includes Applications fitting these categories : +/// +/// - Application with all files built into the docker image and never written to, can be mounted +/// read-only +/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does +/// not require any replication/backup logic to operate +/// - Not supported : an application that writes state to a volume that must be shared or kept +/// to maintain a quorum across various instances +/// - Application connecting to a database/datastore accessible from anywhere such as +/// - Public bucket endpoint +/// - Publicly accessible +/// - Application connecting to a private database external to this application, accessible from the +/// deployment target +/// - Ensuring the private database is reachable is out of scope of this trait (for now) +/// +/// The entire application definition **must not** require any persistent volume or include a +/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart. +/// +/// Typically, applications that can be autoscaled without additional complexity fit the +/// StatelessApplication requirements. +pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}