use std::{io::Write, process::Command, sync::Arc}; use async_trait::async_trait; use log::info; use serde_yaml::Value; use tempfile::NamedTempFile; use crate::{ config::HARMONY_DATA_DIR, data::Version, inventory::Inventory, modules::application::{ ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant, features::{ArgoApplication, ArgoHelmScore}, }, score::Score, topology::{ DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress, }, }; /// ContinuousDelivery in Harmony provides this functionality : /// /// - **Package** the application /// - **Push** to an artifact registry /// - **Deploy** to a testing environment /// - **Deploy** to a production environment /// /// It is intended to be used as an application feature passed down to an ApplicationInterpret. For /// example : /// /// ```rust,ignore /// let app = RustApplicationScore { /// name: "My Rust App".to_string(), /// features: vec![ContinuousDelivery::default()], /// }; /// ``` /// /// *Note :* /// /// By default, the Harmony Opinionated Pipeline is built using these technologies : /// /// - Gitea Action (executes pipeline steps) /// - Docker to build an OCI container image /// - Helm chart to package Kubernetes resources /// - Harbor as artifact registru /// - ArgoCD to install/upgrade/rollback/inspect k8s resources /// - Kubernetes for runtime orchestration #[derive(Debug, Default, Clone)] pub struct PackagingDeployment { pub application: Arc, } impl PackagingDeployment { async fn deploy_to_local_k3d( &self, app_name: String, chart_url: String, image_name: String, ) -> Result<(), String> { // TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology" // https://git.nationtech.io/NationTech/harmony/issues/106 let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d"); // --- 1. Import the container image into the k3d cluster --- info!( "Importing image '{}' into k3d cluster 'harmony'", image_name ); let import_output = Command::new(&k3d_bin_path) .args(["image", "import", &image_name, "--cluster", "harmony"]) .output() .map_err(|e| format!("Failed to execute k3d image import: {}", e))?; if !import_output.status.success() { return Err(format!( "Failed to import image to k3d: {}", String::from_utf8_lossy(&import_output.stderr) )); } // --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file --- info!("Retrieving kubeconfig for k3d cluster 'harmony'"); let kubeconfig_output = Command::new(&k3d_bin_path) .args(["kubeconfig", "get", "harmony"]) .output() .map_err(|e| format!("Failed to execute k3d kubeconfig get: {}", e))?; if !kubeconfig_output.status.success() { return Err(format!( "Failed to get kubeconfig from k3d: {}", String::from_utf8_lossy(&kubeconfig_output.stderr) )); } let mut temp_kubeconfig = NamedTempFile::new() .map_err(|e| format!("Failed to create temp file for kubeconfig: {}", e))?; temp_kubeconfig .write_all(&kubeconfig_output.stdout) .map_err(|e| format!("Failed to write to temp kubeconfig file: {}", e))?; let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap(); // --- 3. Install or upgrade the Helm chart in the cluster --- info!( "Deploying Helm chart '{}' to namespace '{}'", chart_url, app_name ); let release_name = app_name.to_lowercase(); // Helm release names are often lowercase let helm_output = Command::new("helm") .args([ "upgrade", "--install", &release_name, &chart_url, "--namespace", &app_name, "--create-namespace", "--wait", // Wait for the deployment to be ready "--kubeconfig", kubeconfig_path, ]) .spawn() .map_err(|e| format!("Failed to execute helm upgrade: {}", e))? .wait_with_output() .map_err(|e| format!("Failed to execute helm upgrade: {}", e))?; if !helm_output.status.success() { return Err(format!( "Failed to deploy Helm chart: {}", String::from_utf8_lossy(&helm_output.stderr) )); } info!("Successfully deployed '{}' to local k3d cluster.", app_name); Ok(()) } } #[async_trait] impl< A: OCICompliant + HelmPackage + Clone + 'static, T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, > ApplicationFeature for PackagingDeployment { async fn ensure_installed( &self, topology: &T, ) -> Result { let image = self.application.image_name(); let domain = topology .get_domain(&self.application.name()) .await .map_err(|e| e.to_string())?; // TODO Write CI/CD workflow files // we can autotedect the CI type using the remote url (default to github action for github // url, etc..) // Or ask for it when unknown let helm_chart = self .application .build_push_helm_package(&image, &domain) .await?; // TODO: Make building image configurable/skippable if image already exists (prompt)") // https://git.nationtech.io/NationTech/harmony/issues/104 let image = self.application.build_push_oci_image().await?; // TODO: this is a temporary hack for demo purposes, the deployment target should be driven // by the topology only and we should not have to know how to perform tasks like this for // which the topology should be responsible. // // That said, this will require some careful architectural decisions, since the concept of // deployment targets / profiles is probably a layer of complexity that we won't be // completely able to avoid // // I'll try something for now that must be thought through after : att a deployment_profile // function to the topology trait that returns a profile, then anybody who needs it can // access it. This forces every Topology to understand the concept of targets though... So // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it // goes. It still does not feel right though. // // https://git.nationtech.io/NationTech/harmony/issues/106 match topology.current_target() { DeploymentTarget::LocalDev => { info!("Deploying {} locally...", self.application.name()); self.deploy_to_local_k3d(self.application.name(), helm_chart, image) .await?; } target => { info!("Deploying {} to target {target:?}", self.application.name()); let score = ArgoHelmScore { namespace: format!("{}", self.application.name()), openshift: true, argo_apps: vec![ArgoApplication::from(CDApplicationConfig { // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 version: Version::from("0.1.0").unwrap(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), helm_chart_name: format!("{}-chart", self.application.name()), values_overrides: None, name: format!("{}", self.application.name()), namespace: format!("{}", self.application.name()), })], }; score .interpret(&Inventory::empty(), topology) .await .unwrap(); } }; Ok(InstallationOutcome::success_with_details(vec![format!( "{}: http://{domain}", self.application.name() )])) } fn name(&self) -> String { "ContinuousDelivery".to_string() } } /// For now this is entirely bound to K8s / ArgoCD, will have to be revisited when we support /// more CD systems pub struct CDApplicationConfig { pub version: Version, pub helm_chart_repo_url: String, pub helm_chart_name: String, pub values_overrides: Option, pub name: String, pub namespace: String, } pub trait ContinuousDeliveryApplication { fn get_config(&self) -> CDApplicationConfig; }