fix: make sure demo works on both local & remote target (#107)
Some checks failed
Compile and package harmony_composer / package_harmony_composer (push) Waiting to run
Run Check Script / check (push) Has been cancelled

* define Ntfy ingress (naive implementation) based on current target
* use patched Ntfy Helm Chart
* create Ntfy main user only if needed
* add info logs
* better error bubbling
* instrument feature installations
* upgrade prometheus alerting charts if already installed
* harmony_composer params to control deployment `target` and `profile`

Co-authored-by: Ian Letourneau <letourneau.ian@gmail.com>
Co-authored-by: Jean-Gabriel Gill-Couture <jg@nationtech.io>
Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/107
This commit is contained in:
Ian Letourneau 2025-08-14 20:42:09 +00:00
parent 623a3f019b
commit d86970f81b
18 changed files with 283 additions and 145 deletions

View File

@ -7,9 +7,11 @@ use harmony::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
load_balancer::LoadBalancerScore,
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
},
okd::bootstrap_load_balancer::OKDBootstrapLoadBalancerScore,
},
topology::{K8sAnywhereTopology, Url},
};

View File

@ -2,6 +2,8 @@ use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::modules::application::ApplicationFeatureStatus;
use super::{
interpret::{InterpretError, Outcome},
topology::TopologyStatus,
@ -30,6 +32,12 @@ pub enum HarmonyEvent {
status: TopologyStatus,
message: Option<String>,
},
ApplicationFeatureStateChanged {
topology: String,
application: String,
feature: String,
status: ApplicationFeatureStatus,
},
}
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {

View File

@ -120,7 +120,7 @@ impl K8sClient {
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
debug!("Status: {}", s);
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" { Ok(()) } else { Err(s) }
} else {
Err("Couldn't get inner status of pod exec".to_string())

View File

@ -1,7 +1,7 @@
use std::{io::Write, process::Command, sync::Arc};
use async_trait::async_trait;
use log::{debug, error};
use log::info;
use serde_yaml::Value;
use tempfile::NamedTempFile;
@ -56,14 +56,11 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
chart_url: String,
image_name: String,
) -> Result<(), String> {
error!(
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
);
error!("TODO hardcoded k3d bin path is wrong");
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
// https://git.nationtech.io/NationTech/harmony/issues/106
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
// --- 1. Import the container image into the k3d cluster ---
debug!(
info!(
"Importing image '{}' into k3d cluster 'harmony'",
image_name
);
@ -80,7 +77,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
}
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
let kubeconfig_output = Command::new(&k3d_bin_path)
.args(["kubeconfig", "get", "harmony"])
.output()
@ -101,7 +98,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
// --- 3. Install or upgrade the Helm chart in the cluster ---
debug!(
info!(
"Deploying Helm chart '{}' to namespace '{}'",
chart_url, app_name
);
@ -131,7 +128,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
));
}
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
Ok(())
}
}
@ -151,14 +148,12 @@ impl<
// Or ask for it when unknown
let helm_chart = self.application.build_push_helm_package(&image).await?;
debug!("Pushed new helm chart {helm_chart}");
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
// TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104
let image = self.application.build_push_oci_image().await?;
debug!("Pushed new docker image {image}");
debug!("Installing ContinuousDelivery feature");
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
@ -171,17 +166,20 @@ impl<
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
debug!("Deploying to target {target:?}");
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: "harmonydemo-staging".to_string(),
openshift: false,
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
openshift: true,
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
@ -189,7 +187,7 @@ impl<
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
name: "harmony-demo-rust-webapp".to_string(),
namespace: "harmonydemo-staging".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
})],
};
score

View File

@ -1,5 +1,4 @@
use async_trait::async_trait;
use log::error;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::str::FromStr;
@ -50,7 +49,6 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
error!("Uncomment below, only disabled for debugging");
self.score.interpret(inventory, topology).await?;
let k8s_client = topology.k8s_client().await?;
@ -58,9 +56,14 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
.await
.unwrap();
Ok(Outcome::success(format!(
"ArgoCD installed with {} applications",
self.argo_apps.len()
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
)))
}

View File

@ -4,6 +4,7 @@ use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::MultiTargetTopology;
use crate::{
inventory::Inventory,
modules::monitoring::{
@ -33,6 +34,7 @@ impl<
+ 'static
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<CRDPrometheus>,
> ApplicationFeature<T> for Monitoring
@ -55,11 +57,11 @@ impl<
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: "localhost".to_string(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
.await
.expect("couldn't create interpret for ntfy");
.map_err(|e| e.to_string())?;
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
@ -96,7 +98,7 @@ impl<
alerting_score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
.map_err(|e| e.to_string())?;
Ok(())
}
fn name(&self) -> String {

View File

@ -14,11 +14,19 @@ use serde::Serialize;
use crate::{
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
topology::Topology,
};
#[derive(Clone, Debug)]
pub enum ApplicationFeatureStatus {
Installing,
Installed,
Failed { details: String },
}
pub trait Application: std::fmt::Debug + Send + Sync {
fn name(&self) -> String;
}
@ -47,13 +55,34 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
.join(", ")
);
for feature in self.features.iter() {
debug!(
"Installing feature {} for application {app_name}",
feature.name()
);
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installing,
})
.unwrap();
let _ = match feature.ensure_installed(topology).await {
Ok(()) => (),
Ok(()) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installed,
})
.unwrap();
}
Err(msg) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Failed {
details: msg.clone(),
},
})
.unwrap();
return Err(InterpretError::new(format!(
"Application Interpret failed to install feature : {msg}"
)));

View File

@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, error, log_enabled};
use log::{debug, info, log_enabled};
use serde::Serialize;
use tar::Archive;
@ -73,19 +73,19 @@ impl Application for RustWebapp {
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
debug!("Starting Helm chart build and push for '{}'", self.name);
info!("Starting Helm chart build and push for '{}'", self.name);
// 1. Create the Helm chart files on disk.
let chart_dir = self
.create_helm_chart_files(image_url)
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
debug!("Successfully created Helm chart files in {:?}", chart_dir);
info!("Successfully created Helm chart files in {:?}", chart_dir);
// 2. Package the chart into a .tgz archive.
let packaged_chart_path = self
.package_helm_chart(&chart_dir)
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
debug!(
info!(
"Successfully packaged Helm chart: {}",
packaged_chart_path.to_string_lossy()
);
@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp {
let oci_chart_url = self
.push_helm_chart(&packaged_chart_path)
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
Ok(oci_chart_url)
}
@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp {
async fn build_push_oci_image(&self) -> Result<String, String> {
// This function orchestrates the build and push process.
// It's async to match the trait definition, though the underlying docker commands are blocking.
debug!("Starting OCI image build and push for '{}'", self.name);
info!("Starting OCI image build and push for '{}'", self.name);
// 1. Build the image by calling the synchronous helper function.
let image_tag = self.image_name();
self.build_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
debug!("Successfully built Docker image: {}", image_tag);
info!("Successfully built Docker image: {}", image_tag);
// 2. Push the image to the registry.
self.push_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
debug!("Successfully pushed Docker image to: {}", image_tag);
info!("Successfully pushed Docker image to: {}", image_tag);
Ok(image_tag)
}
@ -195,7 +195,7 @@ impl RustWebapp {
);
while let Some(msg) = image_build_stream.next().await {
println!("Message: {msg:?}");
debug!("Message: {msg:?}");
}
Ok(image_name.to_string())
@ -219,7 +219,7 @@ impl RustWebapp {
);
while let Some(msg) = push_image_stream.next().await {
println!("Message: {msg:?}");
debug!("Message: {msg:?}");
}
Ok(image_tag.to_string())
@ -288,9 +288,8 @@ impl RustWebapp {
.unwrap(),
);
// Copy the compiled binary from the builder stage.
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/home/appuser/{}", self.name);
dockerfile.push(
@ -328,9 +327,8 @@ impl RustWebapp {
));
// Copy only the compiled binary from the builder stage.
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
dockerfile.push(

View File

@ -1,9 +1,28 @@
use non_blank_string_rs::NonBlankString;
use std::str::FromStr;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
use crate::{
modules::helm::chart::{HelmChartScore, HelmRepository},
topology::DeploymentTarget,
};
pub fn ntfy_helm_chart_score(
namespace: String,
host: String,
target: DeploymentTarget,
) -> HelmChartScore {
// TODO not actually the correct logic, this should be fixed by using an ingresss which is the
// correct k8s standard.
//
// Another option is to delegate to the topology the ingress technology it wants to use Route,
// Ingress or other
let route_enabled = match target {
DeploymentTarget::LocalDev => false,
DeploymentTarget::Staging => true,
DeploymentTarget::Production => true,
};
let ingress_enabled = !route_enabled;
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
let values = format!(
r#"
replicaCount: 1
@ -25,23 +44,14 @@ serviceAccount:
service:
type: ClusterIP
port: 80
port: 8080
ingress:
enabled: true
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: {host}
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
enabled: {ingress_enabled}
route:
enabled: {route_enabled}
host: {host}
autoscaling:
enabled: false
@ -49,7 +59,7 @@ autoscaling:
config:
enabled: true
data:
# base-url: "https://ntfy.something.com"
base-url: "https://{host}"
auth-file: "/var/cache/ntfy/user.db"
auth-default-access: "deny-all"
cache-file: "/var/cache/ntfy/cache.db"
@ -59,6 +69,7 @@ config:
enable-signup: false
enable-login: "true"
enable-metrics: "true"
listen-http: ":8080"
persistence:
enabled: true
@ -69,16 +80,12 @@ persistence:
HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("ntfy").unwrap(),
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/harmony/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7-nationtech.1").unwrap()),
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: false,
repository: Some(HelmRepository::new(
"sarab97".to_string(),
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
true,
)),
repository: None,
}
}

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use log::info;
use serde::Serialize;
use strum::{Display, EnumString};
@ -11,7 +11,7 @@ use crate::{
inventory::Inventory,
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
score::Score,
topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient},
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
};
#[derive(Debug, Clone, Serialize)]
@ -20,7 +20,7 @@ pub struct NtfyScore {
pub host: String,
}
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for NtfyScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(NtfyInterpret {
score: self.clone(),
@ -77,7 +77,7 @@ impl NtfyInterpret {
vec![
"sh",
"-c",
format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}")
format!("NTFY_PASSWORD={password} ntfy user add --role={role} --ignore-exists {username}")
.as_str(),
],
)
@ -89,22 +89,27 @@ impl NtfyInterpret {
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
#[async_trait]
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> for NtfyInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
.interpret(inventory, topology)
.await?;
ntfy_helm_chart_score(
self.score.namespace.clone(),
self.score.host.clone(),
topology.current_target(),
)
.interpret(inventory, topology)
.await?;
debug!("installed ntfy helm chart");
info!("installed ntfy helm chart");
let client = topology
.k8s_client()
.await
.expect("couldn't get k8s client");
info!("deploying ntfy...");
client
.wait_until_deployment_ready(
"ntfy".to_string(),
@ -112,12 +117,12 @@ impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
None,
)
.await?;
debug!("created k8s client");
info!("ntfy deployed");
info!("adding user harmony");
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
.await?;
debug!("exec into pod done");
info!("user added");
Ok(Outcome::success("Ntfy installed".to_string()))
}

View File

@ -166,7 +166,8 @@ impl K8sPrometheusCRDAlertingInterpret {
let install_output = Command::new("helm")
.args([
"install",
"upgrade",
"--install",
&chart_name,
tgz_path.to_str().unwrap(),
"--namespace",

View File

@ -1,10 +1,16 @@
use harmony::{
instrumentation::{self, HarmonyEvent},
modules::application::ApplicationFeatureStatus,
topology::TopologyStatus,
};
use indicatif::MultiProgress;
use indicatif_log_bridge::LogWrapper;
use std::sync::{Arc, Mutex};
use log::error;
use std::{
sync::{Arc, Mutex},
thread,
time::Duration,
};
use crate::progress::{IndicatifProgressTracker, ProgressTracker};
@ -58,6 +64,7 @@ async fn handle_events(base_progress: MultiProgress) {
&format!("\n{} Harmony completed\n\n", crate::theme::EMOJI_HARMONY),
);
progress_tracker.add_section("harmony-finished", "\n\n");
thread::sleep(Duration::from_millis(200));
return false;
}
HarmonyEvent::TopologyStateChanged {
@ -156,10 +163,40 @@ async fn handle_events(base_progress: MultiProgress) {
_ => progress_tracker.fail_task(&task_key, &outcome.message),
},
Err(err) => {
error!("Interpret error: {err}");
progress_tracker.fail_task(&task_key, &err.to_string());
}
}
}
HarmonyEvent::ApplicationFeatureStateChanged {
topology: _,
application,
feature,
status,
} => {
if let Some(score) = &(*current_score) {
let section_key = score_key(score);
let task_key = app_feature_key(&application, &feature);
match status {
ApplicationFeatureStatus::Installing => {
let message = format!("Feature '{}' installing...", feature);
progress_tracker.add_task(&section_key, &task_key, &message);
}
ApplicationFeatureStatus::Installed => {
let message = format!("Feature '{}' installed", feature);
progress_tracker.finish_task(&task_key, &message);
}
ApplicationFeatureStatus::Failed { details } => {
let message = format!(
"Feature '{}' installation failed: {}",
feature, details
);
progress_tracker.fail_task(&task_key, &message);
}
}
}
}
}
true
}
@ -175,3 +212,7 @@ fn topology_key(topology: &str) -> String {
fn score_key(score: &str) -> String {
format!("score-{score}")
}
fn app_feature_key(application: &str, feature: &str) -> String {
format!("app-{application}-{feature}")
}

View File

@ -132,8 +132,9 @@ async fn init<T: Topology + Send + Sync + 'static>(
// if list option is specified, print filtered list and exit
if args.list {
println!("Available scores:");
println!("{}", list_scores_with_index(&scores_vec));
let num_scores = scores_vec.len();
println!("Available scores {num_scores}:");
println!("{}\n\n", list_scores_with_index(&scores_vec));
return Ok(());
}

View File

@ -33,29 +33,13 @@ pub struct IndicatifProgressTracker {
impl IndicatifProgressTracker {
pub fn new(base: MultiProgress) -> Self {
// The indicatif log bridge will insert a progress bar at the top.
// To prevent our first section from being erased, we need to create
// a dummy progress bar as our first progress bar.
let _ = base.clear();
let log_pb = base.add(ProgressBar::new(1));
let mut sections = HashMap::new();
sections.insert(
"__log__".into(),
Section {
header_index: 0,
task_count: 0,
pb: log_pb.clone(),
},
);
let mut tasks = HashMap::new();
tasks.insert("__log__".into(), log_pb);
let sections = HashMap::new();
let tasks = HashMap::new();
let state = Arc::new(Mutex::new(IndicatifProgressTrackerState {
sections,
tasks,
pb_count: 1,
pb_count: 0,
}));
Self { mp: base, state }

View File

@ -21,10 +21,14 @@ lazy_static! {
pub static ref SUCCESS_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
.clone()
.tick_strings(&[format!("{}", EMOJI_SUCCESS).as_str()]);
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template(" {spinner:.orange} {wide_msg}")
.unwrap()
.clone()
.tick_strings(&[format!("{}", EMOJI_SKIP).as_str()]);
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template(" {spinner:.red} {wide_msg}")
.unwrap()
.clone()
.tick_strings(&[format!("{}", EMOJI_ERROR).as_str()]);
}

View File

@ -1,6 +1,5 @@
use harmony_cli::progress::{IndicatifProgressTracker, ProgressTracker};
use indicatif::MultiProgress;
use log::error;
use std::sync::Arc;
use crate::instrumentation::{self, HarmonyComposerEvent};
@ -53,15 +52,13 @@ pub async fn handle_events() {
progress_tracker.finish_task(COMPILTATION_TASK, "project compiled");
}
HarmonyComposerEvent::ProjectCompilationFailed { details } => {
progress_tracker.fail_task(COMPILTATION_TASK, "failed to compile project");
error!("{details}");
progress_tracker.fail_task(COMPILTATION_TASK, &format!("failed to compile project:\n{details}"));
}
HarmonyComposerEvent::DeploymentStarted { target } => {
HarmonyComposerEvent::DeploymentStarted { target, profile } => {
progress_tracker.add_section(
PROGRESS_DEPLOYMENT,
&format!(
"\n{} Deploying project to {target}...\n",
"\n{} Deploying project on target '{target}' with profile '{profile}'...\n",
harmony_cli::theme::EMOJI_DEPLOY,
),
);
@ -69,6 +66,10 @@ pub async fn handle_events() {
HarmonyComposerEvent::DeploymentCompleted => {
progress_tracker.clear();
}
HarmonyComposerEvent::DeploymentFailed { details } => {
progress_tracker.add_task(PROGRESS_DEPLOYMENT, "deployment-failed", "");
progress_tracker.fail_task("deployment-failed", &details);
},
HarmonyComposerEvent::Shutdown => {
return false;
}

View File

@ -2,16 +2,28 @@ use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::{HarmonyProfile, HarmonyTarget};
#[derive(Debug, Clone)]
pub enum HarmonyComposerEvent {
HarmonyComposerStarted,
ProjectInitializationStarted,
ProjectInitialized,
ProjectCompilationStarted { details: String },
ProjectCompilationStarted {
details: String,
},
ProjectCompiled,
ProjectCompilationFailed { details: String },
DeploymentStarted { target: String },
ProjectCompilationFailed {
details: String,
},
DeploymentStarted {
target: HarmonyTarget,
profile: HarmonyProfile,
},
DeploymentCompleted,
DeploymentFailed {
details: String,
},
Shutdown,
}

View File

@ -49,14 +49,11 @@ struct CheckArgs {
#[derive(Args, Clone, Debug)]
struct DeployArgs {
#[arg(long, default_value_t = false)]
staging: bool,
#[arg(long = "target", short = 't', default_value = "local")]
harmony_target: HarmonyTarget,
#[arg(long, default_value_t = false)]
prod: bool,
#[arg(long, default_value_t = false)]
smoke_test: bool,
#[arg(long = "profile", short = 'p', default_value = "dev")]
harmony_profile: HarmonyProfile,
}
#[derive(Args, Clone, Debug)]
@ -68,6 +65,38 @@ struct AllArgs {
deploy: DeployArgs,
}
#[derive(Clone, Debug, clap::ValueEnum)]
enum HarmonyTarget {
Local,
Remote,
}
impl std::fmt::Display for HarmonyTarget {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HarmonyTarget::Local => f.write_str("local"),
HarmonyTarget::Remote => f.write_str("remote"),
}
}
}
#[derive(Clone, Debug, clap::ValueEnum)]
enum HarmonyProfile {
Dev,
Staging,
Production,
}
impl std::fmt::Display for HarmonyProfile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HarmonyProfile::Dev => f.write_str("dev"),
HarmonyProfile::Staging => f.write_str("staging"),
HarmonyProfile::Production => f.write_str("production"),
}
}
}
#[tokio::main]
async fn main() {
let hc_logger_handle = harmony_composer_logger::init();
@ -122,26 +151,39 @@ async fn main() {
);
}
Commands::Deploy(args) => {
let deploy = if args.staging {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "staging".to_string(),
})
.unwrap();
todo!("implement staging deployment")
} else if args.prod {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "prod".to_string(),
})
.unwrap();
todo!("implement prod deployment")
} else {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "dev".to_string(),
})
.unwrap();
Command::new(harmony_bin_path).arg("-y").arg("-a").spawn()
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: args.harmony_target.clone(),
profile: args.harmony_profile.clone(),
})
.unwrap();
if matches!(args.harmony_profile, HarmonyProfile::Dev)
&& !matches!(args.harmony_target, HarmonyTarget::Local)
{
instrumentation::instrument(HarmonyComposerEvent::DeploymentFailed {
details: format!(
"Cannot run profile '{}' on target '{}'. Profile '{}' can run locally only.",
args.harmony_profile, args.harmony_target, args.harmony_profile
),
}).unwrap();
return;
}
.expect("failed to run harmony deploy");
let use_local_k3d = match args.harmony_target {
HarmonyTarget::Local => true,
HarmonyTarget::Remote => false,
};
let mut command = Command::new(harmony_bin_path);
command
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
.arg("-y")
.arg("-a");
info!("{:?}", command);
let deploy = command.spawn().expect("failed to run harmony deploy");
let deploy_output = deploy.wait_with_output().unwrap();
debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());