From d86970f81b94bdf7f6205be2a770ca881c6698c8 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Thu, 14 Aug 2025 20:42:09 +0000 Subject: [PATCH] fix: make sure demo works on both local & remote target (#107) * define Ntfy ingress (naive implementation) based on current target * use patched Ntfy Helm Chart * create Ntfy main user only if needed * add info logs * better error bubbling * instrument feature installations * upgrade prometheus alerting charts if already installed * harmony_composer params to control deployment `target` and `profile` Co-authored-by: Ian Letourneau Co-authored-by: Jean-Gabriel Gill-Couture Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/107 --- examples/rust/src/main.rs | 2 + harmony/src/domain/instrumentation.rs | 8 ++ harmony/src/domain/topology/k8s.rs | 2 +- .../features/continuous_delivery.rs | 38 ++++---- .../application/features/helm_argocd_score.rs | 11 ++- .../application/features/monitoring.rs | 8 +- harmony/src/modules/application/mod.rs | 39 +++++++- harmony/src/modules/application/rust.rs | 30 +++--- .../monitoring/ntfy/helm/ntfy_helm_chart.rs | 55 ++++++----- harmony/src/modules/monitoring/ntfy/ntfy.rs | 29 +++--- .../k8s_prometheus_alerting_score.rs | 3 +- harmony_cli/src/cli_logger.rs | 43 ++++++++- harmony_cli/src/lib.rs | 5 +- harmony_cli/src/progress.rs | 22 +---- harmony_cli/src/theme.rs | 8 +- .../src/harmony_composer_logger.rs | 13 +-- harmony_composer/src/instrumentation.rs | 18 +++- harmony_composer/src/main.rs | 94 ++++++++++++++----- 18 files changed, 283 insertions(+), 145 deletions(-) diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index feb92ef..3be2582 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -7,9 +7,11 @@ use harmony::{ ApplicationScore, RustWebFramework, RustWebapp, features::{ContinuousDelivery, Monitoring}, }, + load_balancer::LoadBalancerScore, monitoring::alert_channel::{ discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, }, + okd::bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, }, topology::{K8sAnywhereTopology, Url}, }; diff --git a/harmony/src/domain/instrumentation.rs b/harmony/src/domain/instrumentation.rs index 2e113a3..6f0497e 100644 --- a/harmony/src/domain/instrumentation.rs +++ b/harmony/src/domain/instrumentation.rs @@ -2,6 +2,8 @@ use log::debug; use once_cell::sync::Lazy; use tokio::sync::broadcast; +use crate::modules::application::ApplicationFeatureStatus; + use super::{ interpret::{InterpretError, Outcome}, topology::TopologyStatus, @@ -30,6 +32,12 @@ pub enum HarmonyEvent { status: TopologyStatus, message: Option, }, + ApplicationFeatureStateChanged { + topology: String, + application: String, + feature: String, + status: ApplicationFeatureStatus, + }, } static HARMONY_EVENT_BUS: Lazy> = Lazy::new(|| { diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index 1314f6a..388ff9d 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -120,7 +120,7 @@ impl K8sClient { .expect("Couldn't unwrap status"); if let Some(s) = status.status { - debug!("Status: {}", s); + debug!("Status: {} - {:?}", s, status.details); if s == "Success" { Ok(()) } else { Err(s) } } else { Err("Couldn't get inner status of pod exec".to_string()) diff --git a/harmony/src/modules/application/features/continuous_delivery.rs b/harmony/src/modules/application/features/continuous_delivery.rs index e53bd36..7b447d0 100644 --- a/harmony/src/modules/application/features/continuous_delivery.rs +++ b/harmony/src/modules/application/features/continuous_delivery.rs @@ -1,7 +1,7 @@ use std::{io::Write, process::Command, sync::Arc}; use async_trait::async_trait; -use log::{debug, error}; +use log::info; use serde_yaml::Value; use tempfile::NamedTempFile; @@ -56,14 +56,11 @@ impl ContinuousDelivery { chart_url: String, image_name: String, ) -> Result<(), String> { - error!( - "FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology" - ); - - error!("TODO hardcoded k3d bin path is wrong"); + // TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology" + // https://git.nationtech.io/NationTech/harmony/issues/106 let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d"); // --- 1. Import the container image into the k3d cluster --- - debug!( + info!( "Importing image '{}' into k3d cluster 'harmony'", image_name ); @@ -80,7 +77,7 @@ impl ContinuousDelivery { } // --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file --- - debug!("Retrieving kubeconfig for k3d cluster 'harmony'"); + info!("Retrieving kubeconfig for k3d cluster 'harmony'"); let kubeconfig_output = Command::new(&k3d_bin_path) .args(["kubeconfig", "get", "harmony"]) .output() @@ -101,7 +98,7 @@ impl ContinuousDelivery { let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap(); // --- 3. Install or upgrade the Helm chart in the cluster --- - debug!( + info!( "Deploying Helm chart '{}' to namespace '{}'", chart_url, app_name ); @@ -131,7 +128,7 @@ impl ContinuousDelivery { )); } - debug!("Successfully deployed '{}' to local k3d cluster.", app_name); + info!("Successfully deployed '{}' to local k3d cluster.", app_name); Ok(()) } } @@ -151,14 +148,12 @@ impl< // Or ask for it when unknown let helm_chart = self.application.build_push_helm_package(&image).await?; - debug!("Pushed new helm chart {helm_chart}"); - error!("TODO Make building image configurable/skippable if image already exists (prompt)"); + // TODO: Make building image configurable/skippable if image already exists (prompt)") + // https://git.nationtech.io/NationTech/harmony/issues/104 let image = self.application.build_push_oci_image().await?; - debug!("Pushed new docker image {image}"); - debug!("Installing ContinuousDelivery feature"); - // TODO this is a temporary hack for demo purposes, the deployment target should be driven + // TODO: this is a temporary hack for demo purposes, the deployment target should be driven // by the topology only and we should not have to know how to perform tasks like this for // which the topology should be responsible. // @@ -171,17 +166,20 @@ impl< // access it. This forces every Topology to understand the concept of targets though... So // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it // goes. It still does not feel right though. + // + // https://git.nationtech.io/NationTech/harmony/issues/106 match topology.current_target() { DeploymentTarget::LocalDev => { + info!("Deploying {} locally...", self.application.name()); self.deploy_to_local_k3d(self.application.name(), helm_chart, image) .await?; } target => { - debug!("Deploying to target {target:?}"); + info!("Deploying {} to target {target:?}", self.application.name()); let score = ArgoHelmScore { - namespace: "harmonydemo-staging".to_string(), - openshift: false, - domain: "argo.harmonydemo.apps.st.mcd".to_string(), + namespace: "harmony-example-rust-webapp".to_string(), + openshift: true, + domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(), argo_apps: vec![ArgoApplication::from(CDApplicationConfig { // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 version: Version::from("0.1.0").unwrap(), @@ -189,7 +187,7 @@ impl< helm_chart_name: "harmony-example-rust-webapp-chart".to_string(), values_overrides: None, name: "harmony-demo-rust-webapp".to_string(), - namespace: "harmonydemo-staging".to_string(), + namespace: "harmony-example-rust-webapp".to_string(), })], }; score diff --git a/harmony/src/modules/application/features/helm_argocd_score.rs b/harmony/src/modules/application/features/helm_argocd_score.rs index 5a91798..0532111 100644 --- a/harmony/src/modules/application/features/helm_argocd_score.rs +++ b/harmony/src/modules/application/features/helm_argocd_score.rs @@ -1,5 +1,4 @@ use async_trait::async_trait; -use log::error; use non_blank_string_rs::NonBlankString; use serde::Serialize; use std::str::FromStr; @@ -50,7 +49,6 @@ impl Interpret for ArgoInterpret { inventory: &Inventory, topology: &T, ) -> Result { - error!("Uncomment below, only disabled for debugging"); self.score.interpret(inventory, topology).await?; let k8s_client = topology.k8s_client().await?; @@ -58,9 +56,14 @@ impl Interpret for ArgoInterpret { .apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None) .await .unwrap(); + Ok(Outcome::success(format!( - "ArgoCD installed with {} applications", - self.argo_apps.len() + "ArgoCD installed with {} {}", + self.argo_apps.len(), + match self.argo_apps.len() { + 1 => "application", + _ => "applications", + } ))) } diff --git a/harmony/src/modules/application/features/monitoring.rs b/harmony/src/modules/application/features/monitoring.rs index 1ffdace..e8303ce 100644 --- a/harmony/src/modules/application/features/monitoring.rs +++ b/harmony/src/modules/application/features/monitoring.rs @@ -4,6 +4,7 @@ use crate::modules::application::{Application, ApplicationFeature}; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; +use crate::topology::MultiTargetTopology; use crate::{ inventory::Inventory, modules::monitoring::{ @@ -33,6 +34,7 @@ impl< + 'static + TenantManager + K8sclient + + MultiTargetTopology + std::fmt::Debug + PrometheusApplicationMonitoring, > ApplicationFeature for Monitoring @@ -55,11 +57,11 @@ impl< }; let ntfy = NtfyScore { namespace: namespace.clone(), - host: "localhost".to_string(), + host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), }; ntfy.interpret(&Inventory::empty(), topology) .await - .expect("couldn't create interpret for ntfy"); + .map_err(|e| e.to_string())?; let ntfy_default_auth_username = "harmony"; let ntfy_default_auth_password = "harmony"; @@ -96,7 +98,7 @@ impl< alerting_score .interpret(&Inventory::empty(), topology) .await - .unwrap(); + .map_err(|e| e.to_string())?; Ok(()) } fn name(&self) -> String { diff --git a/harmony/src/modules/application/mod.rs b/harmony/src/modules/application/mod.rs index 4ca9c54..beb10d6 100644 --- a/harmony/src/modules/application/mod.rs +++ b/harmony/src/modules/application/mod.rs @@ -14,11 +14,19 @@ use serde::Serialize; use crate::{ data::{Id, Version}, + instrumentation::{self, HarmonyEvent}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, topology::Topology, }; +#[derive(Clone, Debug)] +pub enum ApplicationFeatureStatus { + Installing, + Installed, + Failed { details: String }, +} + pub trait Application: std::fmt::Debug + Send + Sync { fn name(&self) -> String; } @@ -47,13 +55,34 @@ impl Interpret for Application .join(", ") ); for feature in self.features.iter() { - debug!( - "Installing feature {} for application {app_name}", - feature.name() - ); + instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { + topology: topology.name().into(), + application: self.application.name(), + feature: feature.name(), + status: ApplicationFeatureStatus::Installing, + }) + .unwrap(); + let _ = match feature.ensure_installed(topology).await { - Ok(()) => (), + Ok(()) => { + instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { + topology: topology.name().into(), + application: self.application.name(), + feature: feature.name(), + status: ApplicationFeatureStatus::Installed, + }) + .unwrap(); + } Err(msg) => { + instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { + topology: topology.name().into(), + application: self.application.name(), + feature: feature.name(), + status: ApplicationFeatureStatus::Failed { + details: msg.clone(), + }, + }) + .unwrap(); return Err(InterpretError::new(format!( "Application Interpret failed to install feature : {msg}" ))); diff --git a/harmony/src/modules/application/rust.rs b/harmony/src/modules/application/rust.rs index 22a1c42..da1e594 100644 --- a/harmony/src/modules/application/rust.rs +++ b/harmony/src/modules/application/rust.rs @@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile; use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR}; use dockerfile_builder::instruction_builder::CopyBuilder; use futures_util::StreamExt; -use log::{debug, error, log_enabled}; +use log::{debug, info, log_enabled}; use serde::Serialize; use tar::Archive; @@ -73,19 +73,19 @@ impl Application for RustWebapp { #[async_trait] impl HelmPackage for RustWebapp { async fn build_push_helm_package(&self, image_url: &str) -> Result { - debug!("Starting Helm chart build and push for '{}'", self.name); + info!("Starting Helm chart build and push for '{}'", self.name); // 1. Create the Helm chart files on disk. let chart_dir = self .create_helm_chart_files(image_url) .map_err(|e| format!("Failed to create Helm chart files: {}", e))?; - debug!("Successfully created Helm chart files in {:?}", chart_dir); + info!("Successfully created Helm chart files in {:?}", chart_dir); // 2. Package the chart into a .tgz archive. let packaged_chart_path = self .package_helm_chart(&chart_dir) .map_err(|e| format!("Failed to package Helm chart: {}", e))?; - debug!( + info!( "Successfully packaged Helm chart: {}", packaged_chart_path.to_string_lossy() ); @@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp { let oci_chart_url = self .push_helm_chart(&packaged_chart_path) .map_err(|e| format!("Failed to push Helm chart: {}", e))?; - debug!("Successfully pushed Helm chart to: {}", oci_chart_url); + info!("Successfully pushed Helm chart to: {}", oci_chart_url); Ok(oci_chart_url) } @@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp { async fn build_push_oci_image(&self) -> Result { // This function orchestrates the build and push process. // It's async to match the trait definition, though the underlying docker commands are blocking. - debug!("Starting OCI image build and push for '{}'", self.name); + info!("Starting OCI image build and push for '{}'", self.name); // 1. Build the image by calling the synchronous helper function. let image_tag = self.image_name(); self.build_docker_image(&image_tag) .await .map_err(|e| format!("Failed to build Docker image: {}", e))?; - debug!("Successfully built Docker image: {}", image_tag); + info!("Successfully built Docker image: {}", image_tag); // 2. Push the image to the registry. self.push_docker_image(&image_tag) .await .map_err(|e| format!("Failed to push Docker image: {}", e))?; - debug!("Successfully pushed Docker image to: {}", image_tag); + info!("Successfully pushed Docker image to: {}", image_tag); Ok(image_tag) } @@ -195,7 +195,7 @@ impl RustWebapp { ); while let Some(msg) = image_build_stream.next().await { - println!("Message: {msg:?}"); + debug!("Message: {msg:?}"); } Ok(image_name.to_string()) @@ -219,7 +219,7 @@ impl RustWebapp { ); while let Some(msg) = push_image_stream.next().await { - println!("Message: {msg:?}"); + debug!("Message: {msg:?}"); } Ok(image_tag.to_string()) @@ -288,9 +288,8 @@ impl RustWebapp { .unwrap(), ); // Copy the compiled binary from the builder stage. - error!( - "FIXME Should not be using score name here, instead should use name from Cargo.toml" - ); + // TODO: Should not be using score name here, instead should use name from Cargo.toml + // https://git.nationtech.io/NationTech/harmony/issues/105 let binary_path_in_builder = format!("/app/target/release/{}", self.name); let binary_path_in_final = format!("/home/appuser/{}", self.name); dockerfile.push( @@ -328,9 +327,8 @@ impl RustWebapp { )); // Copy only the compiled binary from the builder stage. - error!( - "FIXME Should not be using score name here, instead should use name from Cargo.toml" - ); + // TODO: Should not be using score name here, instead should use name from Cargo.toml + // https://git.nationtech.io/NationTech/harmony/issues/105 let binary_path_in_builder = format!("/app/target/release/{}", self.name); let binary_path_in_final = format!("/usr/local/bin/{}", self.name); dockerfile.push( diff --git a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs index d94a78d..6949f26 100644 --- a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs +++ b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs @@ -1,9 +1,28 @@ use non_blank_string_rs::NonBlankString; use std::str::FromStr; -use crate::modules::helm::chart::{HelmChartScore, HelmRepository}; +use crate::{ + modules::helm::chart::{HelmChartScore, HelmRepository}, + topology::DeploymentTarget, +}; + +pub fn ntfy_helm_chart_score( + namespace: String, + host: String, + target: DeploymentTarget, +) -> HelmChartScore { + // TODO not actually the correct logic, this should be fixed by using an ingresss which is the + // correct k8s standard. + // + // Another option is to delegate to the topology the ingress technology it wants to use Route, + // Ingress or other + let route_enabled = match target { + DeploymentTarget::LocalDev => false, + DeploymentTarget::Staging => true, + DeploymentTarget::Production => true, + }; + let ingress_enabled = !route_enabled; -pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore { let values = format!( r#" replicaCount: 1 @@ -25,23 +44,14 @@ serviceAccount: service: type: ClusterIP - port: 80 + port: 8080 ingress: - enabled: true -# annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: {host} - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local + enabled: {ingress_enabled} +route: + enabled: {route_enabled} + host: {host} autoscaling: enabled: false @@ -49,7 +59,7 @@ autoscaling: config: enabled: true data: -# base-url: "https://ntfy.something.com" + base-url: "https://{host}" auth-file: "/var/cache/ntfy/user.db" auth-default-access: "deny-all" cache-file: "/var/cache/ntfy/cache.db" @@ -59,6 +69,7 @@ config: enable-signup: false enable-login: "true" enable-metrics: "true" + listen-http: ":8080" persistence: enabled: true @@ -69,16 +80,12 @@ persistence: HelmChartScore { namespace: Some(NonBlankString::from_str(&namespace).unwrap()), release_name: NonBlankString::from_str("ntfy").unwrap(), - chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(), - chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()), + chart_name: NonBlankString::from_str("oci://hub.nationtech.io/harmony/ntfy").unwrap(), + chart_version: Some(NonBlankString::from_str("0.1.7-nationtech.1").unwrap()), values_overrides: None, values_yaml: Some(values.to_string()), create_namespace: true, install_only: false, - repository: Some(HelmRepository::new( - "sarab97".to_string(), - url::Url::parse("https://charts.sarabsingh.com").unwrap(), - true, - )), + repository: None, } } diff --git a/harmony/src/modules/monitoring/ntfy/ntfy.rs b/harmony/src/modules/monitoring/ntfy/ntfy.rs index 8ad3230..77822d9 100644 --- a/harmony/src/modules/monitoring/ntfy/ntfy.rs +++ b/harmony/src/modules/monitoring/ntfy/ntfy.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; -use log::debug; +use log::info; use serde::Serialize; use strum::{Display, EnumString}; @@ -11,7 +11,7 @@ use crate::{ inventory::Inventory, modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score, score::Score, - topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient}, + topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient}, }; #[derive(Debug, Clone, Serialize)] @@ -20,7 +20,7 @@ pub struct NtfyScore { pub host: String, } -impl Score for NtfyScore { +impl Score for NtfyScore { fn create_interpret(&self) -> Box> { Box::new(NtfyInterpret { score: self.clone(), @@ -77,7 +77,7 @@ impl NtfyInterpret { vec![ "sh", "-c", - format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}") + format!("NTFY_PASSWORD={password} ntfy user add --role={role} --ignore-exists {username}") .as_str(), ], ) @@ -89,22 +89,27 @@ impl NtfyInterpret { /// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy #[async_trait] -impl Interpret for NtfyInterpret { +impl Interpret for NtfyInterpret { async fn execute( &self, inventory: &Inventory, topology: &T, ) -> Result { - ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone()) - .interpret(inventory, topology) - .await?; + ntfy_helm_chart_score( + self.score.namespace.clone(), + self.score.host.clone(), + topology.current_target(), + ) + .interpret(inventory, topology) + .await?; - debug!("installed ntfy helm chart"); + info!("installed ntfy helm chart"); let client = topology .k8s_client() .await .expect("couldn't get k8s client"); + info!("deploying ntfy..."); client .wait_until_deployment_ready( "ntfy".to_string(), @@ -112,12 +117,12 @@ impl Interpret for NtfyInterpret { None, ) .await?; - debug!("created k8s client"); + info!("ntfy deployed"); + info!("adding user harmony"); self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin)) .await?; - - debug!("exec into pod done"); + info!("user added"); Ok(Outcome::success("Ntfy installed".to_string())) } diff --git a/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs b/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs index c70f5e5..0af1063 100644 --- a/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs +++ b/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs @@ -166,7 +166,8 @@ impl K8sPrometheusCRDAlertingInterpret { let install_output = Command::new("helm") .args([ - "install", + "upgrade", + "--install", &chart_name, tgz_path.to_str().unwrap(), "--namespace", diff --git a/harmony_cli/src/cli_logger.rs b/harmony_cli/src/cli_logger.rs index c2ed79d..9078e5d 100644 --- a/harmony_cli/src/cli_logger.rs +++ b/harmony_cli/src/cli_logger.rs @@ -1,10 +1,16 @@ use harmony::{ instrumentation::{self, HarmonyEvent}, + modules::application::ApplicationFeatureStatus, topology::TopologyStatus, }; use indicatif::MultiProgress; use indicatif_log_bridge::LogWrapper; -use std::sync::{Arc, Mutex}; +use log::error; +use std::{ + sync::{Arc, Mutex}, + thread, + time::Duration, +}; use crate::progress::{IndicatifProgressTracker, ProgressTracker}; @@ -58,6 +64,7 @@ async fn handle_events(base_progress: MultiProgress) { &format!("\n{} Harmony completed\n\n", crate::theme::EMOJI_HARMONY), ); progress_tracker.add_section("harmony-finished", "\n\n"); + thread::sleep(Duration::from_millis(200)); return false; } HarmonyEvent::TopologyStateChanged { @@ -156,10 +163,40 @@ async fn handle_events(base_progress: MultiProgress) { _ => progress_tracker.fail_task(&task_key, &outcome.message), }, Err(err) => { + error!("Interpret error: {err}"); progress_tracker.fail_task(&task_key, &err.to_string()); } } } + HarmonyEvent::ApplicationFeatureStateChanged { + topology: _, + application, + feature, + status, + } => { + if let Some(score) = &(*current_score) { + let section_key = score_key(score); + let task_key = app_feature_key(&application, &feature); + + match status { + ApplicationFeatureStatus::Installing => { + let message = format!("Feature '{}' installing...", feature); + progress_tracker.add_task(§ion_key, &task_key, &message); + } + ApplicationFeatureStatus::Installed => { + let message = format!("Feature '{}' installed", feature); + progress_tracker.finish_task(&task_key, &message); + } + ApplicationFeatureStatus::Failed { details } => { + let message = format!( + "Feature '{}' installation failed: {}", + feature, details + ); + progress_tracker.fail_task(&task_key, &message); + } + } + } + } } true } @@ -175,3 +212,7 @@ fn topology_key(topology: &str) -> String { fn score_key(score: &str) -> String { format!("score-{score}") } + +fn app_feature_key(application: &str, feature: &str) -> String { + format!("app-{application}-{feature}") +} diff --git a/harmony_cli/src/lib.rs b/harmony_cli/src/lib.rs index 11ac572..711a709 100644 --- a/harmony_cli/src/lib.rs +++ b/harmony_cli/src/lib.rs @@ -132,8 +132,9 @@ async fn init( // if list option is specified, print filtered list and exit if args.list { - println!("Available scores:"); - println!("{}", list_scores_with_index(&scores_vec)); + let num_scores = scores_vec.len(); + println!("Available scores {num_scores}:"); + println!("{}\n\n", list_scores_with_index(&scores_vec)); return Ok(()); } diff --git a/harmony_cli/src/progress.rs b/harmony_cli/src/progress.rs index eee8adc..b5b5310 100644 --- a/harmony_cli/src/progress.rs +++ b/harmony_cli/src/progress.rs @@ -33,29 +33,13 @@ pub struct IndicatifProgressTracker { impl IndicatifProgressTracker { pub fn new(base: MultiProgress) -> Self { - // The indicatif log bridge will insert a progress bar at the top. - // To prevent our first section from being erased, we need to create - // a dummy progress bar as our first progress bar. - let _ = base.clear(); - let log_pb = base.add(ProgressBar::new(1)); - - let mut sections = HashMap::new(); - sections.insert( - "__log__".into(), - Section { - header_index: 0, - task_count: 0, - pb: log_pb.clone(), - }, - ); - - let mut tasks = HashMap::new(); - tasks.insert("__log__".into(), log_pb); + let sections = HashMap::new(); + let tasks = HashMap::new(); let state = Arc::new(Mutex::new(IndicatifProgressTrackerState { sections, tasks, - pb_count: 1, + pb_count: 0, })); Self { mp: base, state } diff --git a/harmony_cli/src/theme.rs b/harmony_cli/src/theme.rs index d86e194..66eee45 100644 --- a/harmony_cli/src/theme.rs +++ b/harmony_cli/src/theme.rs @@ -21,10 +21,14 @@ lazy_static! { pub static ref SUCCESS_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE .clone() .tick_strings(&[format!("{}", EMOJI_SUCCESS).as_str()]); - pub static ref SKIP_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE + pub static ref SKIP_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner() + .template(" {spinner:.orange} {wide_msg}") + .unwrap() .clone() .tick_strings(&[format!("{}", EMOJI_SKIP).as_str()]); - pub static ref ERROR_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE + pub static ref ERROR_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner() + .template(" {spinner:.red} {wide_msg}") + .unwrap() .clone() .tick_strings(&[format!("{}", EMOJI_ERROR).as_str()]); } diff --git a/harmony_composer/src/harmony_composer_logger.rs b/harmony_composer/src/harmony_composer_logger.rs index e05cb58..5e0261f 100644 --- a/harmony_composer/src/harmony_composer_logger.rs +++ b/harmony_composer/src/harmony_composer_logger.rs @@ -1,6 +1,5 @@ use harmony_cli::progress::{IndicatifProgressTracker, ProgressTracker}; use indicatif::MultiProgress; -use log::error; use std::sync::Arc; use crate::instrumentation::{self, HarmonyComposerEvent}; @@ -53,15 +52,13 @@ pub async fn handle_events() { progress_tracker.finish_task(COMPILTATION_TASK, "project compiled"); } HarmonyComposerEvent::ProjectCompilationFailed { details } => { - progress_tracker.fail_task(COMPILTATION_TASK, "failed to compile project"); - - error!("{details}"); + progress_tracker.fail_task(COMPILTATION_TASK, &format!("failed to compile project:\n{details}")); } - HarmonyComposerEvent::DeploymentStarted { target } => { + HarmonyComposerEvent::DeploymentStarted { target, profile } => { progress_tracker.add_section( PROGRESS_DEPLOYMENT, &format!( - "\n{} Deploying project to {target}...\n", + "\n{} Deploying project on target '{target}' with profile '{profile}'...\n", harmony_cli::theme::EMOJI_DEPLOY, ), ); @@ -69,6 +66,10 @@ pub async fn handle_events() { HarmonyComposerEvent::DeploymentCompleted => { progress_tracker.clear(); } + HarmonyComposerEvent::DeploymentFailed { details } => { + progress_tracker.add_task(PROGRESS_DEPLOYMENT, "deployment-failed", ""); + progress_tracker.fail_task("deployment-failed", &details); + }, HarmonyComposerEvent::Shutdown => { return false; } diff --git a/harmony_composer/src/instrumentation.rs b/harmony_composer/src/instrumentation.rs index b2e9c99..6f2fa01 100644 --- a/harmony_composer/src/instrumentation.rs +++ b/harmony_composer/src/instrumentation.rs @@ -2,16 +2,28 @@ use log::debug; use once_cell::sync::Lazy; use tokio::sync::broadcast; +use crate::{HarmonyProfile, HarmonyTarget}; + #[derive(Debug, Clone)] pub enum HarmonyComposerEvent { HarmonyComposerStarted, ProjectInitializationStarted, ProjectInitialized, - ProjectCompilationStarted { details: String }, + ProjectCompilationStarted { + details: String, + }, ProjectCompiled, - ProjectCompilationFailed { details: String }, - DeploymentStarted { target: String }, + ProjectCompilationFailed { + details: String, + }, + DeploymentStarted { + target: HarmonyTarget, + profile: HarmonyProfile, + }, DeploymentCompleted, + DeploymentFailed { + details: String, + }, Shutdown, } diff --git a/harmony_composer/src/main.rs b/harmony_composer/src/main.rs index 9f2c9ea..f0a8513 100644 --- a/harmony_composer/src/main.rs +++ b/harmony_composer/src/main.rs @@ -49,14 +49,11 @@ struct CheckArgs { #[derive(Args, Clone, Debug)] struct DeployArgs { - #[arg(long, default_value_t = false)] - staging: bool, + #[arg(long = "target", short = 't', default_value = "local")] + harmony_target: HarmonyTarget, - #[arg(long, default_value_t = false)] - prod: bool, - - #[arg(long, default_value_t = false)] - smoke_test: bool, + #[arg(long = "profile", short = 'p', default_value = "dev")] + harmony_profile: HarmonyProfile, } #[derive(Args, Clone, Debug)] @@ -68,6 +65,38 @@ struct AllArgs { deploy: DeployArgs, } +#[derive(Clone, Debug, clap::ValueEnum)] +enum HarmonyTarget { + Local, + Remote, +} + +impl std::fmt::Display for HarmonyTarget { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HarmonyTarget::Local => f.write_str("local"), + HarmonyTarget::Remote => f.write_str("remote"), + } + } +} + +#[derive(Clone, Debug, clap::ValueEnum)] +enum HarmonyProfile { + Dev, + Staging, + Production, +} + +impl std::fmt::Display for HarmonyProfile { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HarmonyProfile::Dev => f.write_str("dev"), + HarmonyProfile::Staging => f.write_str("staging"), + HarmonyProfile::Production => f.write_str("production"), + } + } +} + #[tokio::main] async fn main() { let hc_logger_handle = harmony_composer_logger::init(); @@ -122,26 +151,39 @@ async fn main() { ); } Commands::Deploy(args) => { - let deploy = if args.staging { - instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted { - target: "staging".to_string(), - }) - .unwrap(); - todo!("implement staging deployment") - } else if args.prod { - instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted { - target: "prod".to_string(), - }) - .unwrap(); - todo!("implement prod deployment") - } else { - instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted { - target: "dev".to_string(), - }) - .unwrap(); - Command::new(harmony_bin_path).arg("-y").arg("-a").spawn() + instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted { + target: args.harmony_target.clone(), + profile: args.harmony_profile.clone(), + }) + .unwrap(); + + if matches!(args.harmony_profile, HarmonyProfile::Dev) + && !matches!(args.harmony_target, HarmonyTarget::Local) + { + instrumentation::instrument(HarmonyComposerEvent::DeploymentFailed { + details: format!( + "Cannot run profile '{}' on target '{}'. Profile '{}' can run locally only.", + args.harmony_profile, args.harmony_target, args.harmony_profile + ), + }).unwrap(); + return; } - .expect("failed to run harmony deploy"); + + let use_local_k3d = match args.harmony_target { + HarmonyTarget::Local => true, + HarmonyTarget::Remote => false, + }; + + let mut command = Command::new(harmony_bin_path); + command + .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}")) + .env("HARMONY_PROFILE", format!("{}", args.harmony_profile)) + .arg("-y") + .arg("-a"); + + info!("{:?}", command); + + let deploy = command.spawn().expect("failed to run harmony deploy"); let deploy_output = deploy.wait_with_output().unwrap(); debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());