forked from NationTech/harmony
fix(cli): reduce noise & better track progress within Harmony (#91)
Introduce a way to instrument what happens within Harmony and around Harmony (e.g. in the CLI or in Composer). The goal is to provide visual feedback to the end users and inform them of the progress of their tasks (e.g. deployment) as clearly as possible. It is important to also let them know of the outcome of their tasks (what was created, where to access stuff, etc.). <img src="https://media.discordapp.net/attachments/1295353830300713062/1400289618636574741/demo.gif?ex=688c18d5&is=688ac755&hm=2c70884aacb08f7bd15cbb65a7562a174846906718aa15294bbb238e64febbce&=" /> ## Changes ### Instrumentation architecture Extensibility and ease of use is key here, while preserving type safety as much as possible. The proposed API is quite simple: ```rs // Emit an event instrumentation::instrument( HarmonyEvent::TopologyPrepared { topology: "k8s-anywhere", outcome: Outcome::success("yay") } ); // Consume events instrumentation::subscribe("Harmony CLI Logger", async |event| { match event { HarmonyEvent::TopologyPrepared { name, outcome } => todo!(), } }); ``` #### Current limitations * this API is not very extensible, but it could be easily changed to allow end users to define custom events in addition to Harmony core events * we use a tokio broadcast channel behind the scene so only in process communication can happen, but it could be easily changed to a more flexible communication mechanism as implementation details are hidden ### `harmony_composer` VS `harmony_cli` As Harmony Composer launches commands from Harmony (CLI), they both live in different processes. And because of this, we cannot easily make all the logging happens in one place (Harmony Composer) and get rid of Harmony CLI. At least not without introducing additional complexity such as communication through a server, unix socket, etc. So for the time being, it was decided to preserve both `harmony_composer` and `harmony_cli` and let them independently log their stuff and handle their own responsibilities: * `harmony_composer`: takes care only of setting up & packaging a project, delegates everything else to `harmony_cli` * `harmony_cli`: takes care of configuring & running Harmony ### Logging & prompts * [indicatif](https://github.com/console-rs/indicatif) is used to create progress bars and track progress within Harmony, Harmony CLI, and Harmony Composer * [inquire](https://github.com/mikaelmello/inquire) is preserved, but was removed from `harmony` (core) as UI concerns shouldn't go that deep * note: for now the only prompt we had was simply deleted, we'll have to find a better way to prompt stuff in the future ## Todos * [ ] Update/Create ADRs * [ ] Continue instrumentation for missing branches * [ ] Allow instrumentation to emit and subscribe to custom events Co-authored-by: Ian Letourneau <letourneau.ian@gmail.com> Reviewed-on: NationTech/harmony#91 Reviewed-by: johnride <jg@nationtech.io>
This commit is contained in:
63
harmony/src/domain/instrumentation.rs
Normal file
63
harmony/src/domain/instrumentation.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use log::debug;
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use super::interpret::{InterpretError, Outcome};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HarmonyEvent {
|
||||
HarmonyStarted,
|
||||
PrepareTopologyStarted {
|
||||
topology: String,
|
||||
},
|
||||
TopologyPrepared {
|
||||
topology: String,
|
||||
outcome: Outcome,
|
||||
},
|
||||
InterpretExecutionStarted {
|
||||
topology: String,
|
||||
interpret: String,
|
||||
message: String,
|
||||
},
|
||||
InterpretExecutionFinished {
|
||||
topology: String,
|
||||
interpret: String,
|
||||
outcome: Result<Outcome, InterpretError>,
|
||||
},
|
||||
}
|
||||
|
||||
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
|
||||
// TODO: Adjust channel capacity
|
||||
let (tx, _rx) = broadcast::channel(100);
|
||||
tx
|
||||
});
|
||||
|
||||
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
|
||||
match HARMONY_EVENT_BUS.send(event) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err("send error: no subscribers"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn subscribe<F, Fut>(name: &str, mut handler: F)
|
||||
where
|
||||
F: FnMut(HarmonyEvent) -> Fut + Send + 'static,
|
||||
Fut: Future<Output = bool> + Send,
|
||||
{
|
||||
let mut rx = HARMONY_EVENT_BUS.subscribe();
|
||||
debug!("[{name}] Service started. Listening for events...");
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(event) => {
|
||||
if !handler(event).await {
|
||||
debug!("[{name}] Handler requested exit.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(broadcast::error::RecvError::Lagged(n)) => {
|
||||
debug!("[{name}] Lagged behind by {n} messages.");
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use log::{info, warn};
|
||||
use log::{debug, info, warn};
|
||||
|
||||
use crate::instrumentation::{self, HarmonyEvent};
|
||||
|
||||
use super::{
|
||||
interpret::{InterpretError, InterpretStatus, Outcome},
|
||||
@@ -40,13 +42,18 @@ impl<T: Topology> Maestro<T> {
|
||||
/// Ensures the associated Topology is ready for operations.
|
||||
/// Delegates the readiness check and potential setup actions to the Topology.
|
||||
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
|
||||
info!("Ensuring topology '{}' is ready...", self.topology.name());
|
||||
instrumentation::instrument(HarmonyEvent::PrepareTopologyStarted {
|
||||
topology: self.topology.name().to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let outcome = self.topology.ensure_ready().await?;
|
||||
info!(
|
||||
"Topology '{}' readiness check complete: {}",
|
||||
self.topology.name(),
|
||||
outcome.status
|
||||
);
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::TopologyPrepared {
|
||||
topology: self.topology.name().to_string(),
|
||||
outcome: outcome.clone(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
self.topology_preparation_result
|
||||
.lock()
|
||||
@@ -80,11 +87,11 @@ impl<T: Topology> Maestro<T> {
|
||||
self.topology.name(),
|
||||
);
|
||||
}
|
||||
info!("Running score {score:?}");
|
||||
debug!("Running score {score:?}");
|
||||
let interpret = score.create_interpret();
|
||||
info!("Launching interpret {interpret:?}");
|
||||
debug!("Launching interpret {interpret:?}");
|
||||
let result = interpret.execute(&self.inventory, &self.topology).await;
|
||||
info!("Got result {result:?}");
|
||||
debug!("Got result {result:?}");
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ pub mod data;
|
||||
pub mod executors;
|
||||
pub mod filter;
|
||||
pub mod hardware;
|
||||
pub mod instrumentation;
|
||||
pub mod interpret;
|
||||
pub mod inventory;
|
||||
pub mod maestro;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use inquire::Confirm;
|
||||
use log::{debug, info, warn};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
@@ -93,9 +92,8 @@ impl K8sAnywhereTopology {
|
||||
return Err("Failed to run 'helm -version'".to_string());
|
||||
}
|
||||
|
||||
// Print the version output
|
||||
let version_output = String::from_utf8_lossy(&version_result.stdout);
|
||||
println!("Helm version: {}", version_output.trim());
|
||||
debug!("Helm version: {}", version_output.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -126,7 +124,7 @@ impl K8sAnywhereTopology {
|
||||
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
|
||||
// be careful when making modifications here
|
||||
if k8s_anywhere_config.use_local_k3d {
|
||||
info!("Using local k3d cluster because of use_local_k3d set to true");
|
||||
debug!("Using local k3d cluster because of use_local_k3d set to true");
|
||||
} else {
|
||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
||||
debug!("Loading kubeconfig {kubeconfig}");
|
||||
@@ -158,22 +156,13 @@ impl K8sAnywhereTopology {
|
||||
}
|
||||
|
||||
if !k8s_anywhere_config.autoinstall {
|
||||
debug!("Autoinstall confirmation prompt");
|
||||
let confirmation = Confirm::new( "Harmony autoinstallation is not activated, do you wish to launch autoinstallation? : ")
|
||||
.with_default(false)
|
||||
.prompt()
|
||||
.expect("Unexpected prompt error");
|
||||
debug!("Autoinstall confirmation {confirmation}");
|
||||
|
||||
if !confirmation {
|
||||
warn!(
|
||||
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
warn!(
|
||||
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
info!("Starting K8sAnywhere installation");
|
||||
debug!("Starting K8sAnywhere installation");
|
||||
self.try_install_k3d().await?;
|
||||
let k3d_score = self.get_k3d_installation_score();
|
||||
// I feel like having to rely on the k3d_rs crate here is a smell
|
||||
@@ -186,7 +175,7 @@ impl K8sAnywhereTopology {
|
||||
Ok(client) => K8sState {
|
||||
client: Arc::new(K8sClient::new(client)),
|
||||
_source: K8sSource::LocalK3d,
|
||||
message: "Successfully installed K3D cluster and acquired client".to_string(),
|
||||
message: "K8s client ready".to_string(),
|
||||
},
|
||||
Err(_) => todo!(),
|
||||
};
|
||||
@@ -237,7 +226,7 @@ pub struct K8sAnywhereConfig {
|
||||
///
|
||||
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
|
||||
///
|
||||
/// Default: false
|
||||
/// Default: true
|
||||
pub autoinstall: bool,
|
||||
|
||||
/// Whether to use local k3d cluster.
|
||||
@@ -256,7 +245,7 @@ impl K8sAnywhereConfig {
|
||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
||||
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(false)),
|
||||
// TODO harmony_profile should be managed at a more core level than this
|
||||
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|
||||
|_| "dev".to_string(),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::{io::Write, process::Command, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::{error, info};
|
||||
use log::{debug, error};
|
||||
use serde_yaml::Value;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
@@ -63,7 +63,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
error!("TODO hardcoded k3d bin path is wrong");
|
||||
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
|
||||
// --- 1. Import the container image into the k3d cluster ---
|
||||
info!(
|
||||
debug!(
|
||||
"Importing image '{}' into k3d cluster 'harmony'",
|
||||
image_name
|
||||
);
|
||||
@@ -80,7 +80,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
}
|
||||
|
||||
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
|
||||
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||
let kubeconfig_output = Command::new(&k3d_bin_path)
|
||||
.args(["kubeconfig", "get", "harmony"])
|
||||
.output()
|
||||
@@ -101,7 +101,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
|
||||
|
||||
// --- 3. Install or upgrade the Helm chart in the cluster ---
|
||||
info!(
|
||||
debug!(
|
||||
"Deploying Helm chart '{}' to namespace '{}'",
|
||||
chart_url, app_name
|
||||
);
|
||||
@@ -131,7 +131,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
||||
));
|
||||
}
|
||||
|
||||
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -145,24 +145,19 @@ impl<
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
let image = self.application.image_name();
|
||||
|
||||
// TODO
|
||||
error!(
|
||||
"TODO reverse helm chart packaging and docker image build. I put helm package first for faster iterations"
|
||||
);
|
||||
|
||||
// TODO Write CI/CD workflow files
|
||||
// we can autotedect the CI type using the remote url (default to github action for github
|
||||
// url, etc..)
|
||||
// Or ask for it when unknown
|
||||
|
||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||
info!("Pushed new helm chart {helm_chart}");
|
||||
debug!("Pushed new helm chart {helm_chart}");
|
||||
|
||||
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
|
||||
let image = self.application.build_push_oci_image().await?;
|
||||
info!("Pushed new docker image {image}");
|
||||
debug!("Pushed new docker image {image}");
|
||||
|
||||
info!("Installing ContinuousDelivery feature");
|
||||
debug!("Installing ContinuousDelivery feature");
|
||||
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||
// by the topology only and we should not have to know how to perform tasks like this for
|
||||
// which the topology should be responsible.
|
||||
@@ -182,7 +177,7 @@ impl<
|
||||
.await?;
|
||||
}
|
||||
target => {
|
||||
info!("Deploying to target {target:?}");
|
||||
debug!("Deploying to target {target:?}");
|
||||
let score = ArgoHelmScore {
|
||||
namespace: "harmonydemo-staging".to_string(),
|
||||
openshift: false,
|
||||
|
||||
@@ -5,7 +5,7 @@ mod rust;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use feature::*;
|
||||
use log::info;
|
||||
use log::debug;
|
||||
pub use oci::*;
|
||||
pub use rust::*;
|
||||
|
||||
@@ -36,7 +36,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let app_name = self.application.name();
|
||||
info!(
|
||||
debug!(
|
||||
"Preparing {} features [{}] for application {app_name}",
|
||||
self.features.len(),
|
||||
self.features
|
||||
@@ -46,7 +46,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
.join(", ")
|
||||
);
|
||||
for feature in self.features.iter() {
|
||||
info!(
|
||||
debug!(
|
||||
"Installing feature {} for application {app_name}",
|
||||
feature.name()
|
||||
);
|
||||
|
||||
@@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
|
||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||
use futures_util::StreamExt;
|
||||
use log::{debug, error, info};
|
||||
use log::{debug, error, log_enabled};
|
||||
use serde::Serialize;
|
||||
use tar::Archive;
|
||||
|
||||
@@ -73,19 +73,19 @@ impl Application for RustWebapp {
|
||||
#[async_trait]
|
||||
impl HelmPackage for RustWebapp {
|
||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
||||
debug!("Starting Helm chart build and push for '{}'", self.name);
|
||||
|
||||
// 1. Create the Helm chart files on disk.
|
||||
let chart_dir = self
|
||||
.create_helm_chart_files(image_url)
|
||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
debug!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||
|
||||
// 2. Package the chart into a .tgz archive.
|
||||
let packaged_chart_path = self
|
||||
.package_helm_chart(&chart_dir)
|
||||
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
|
||||
info!(
|
||||
debug!(
|
||||
"Successfully packaged Helm chart: {}",
|
||||
packaged_chart_path.to_string_lossy()
|
||||
);
|
||||
@@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp {
|
||||
let oci_chart_url = self
|
||||
.push_helm_chart(&packaged_chart_path)
|
||||
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
|
||||
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||
|
||||
Ok(oci_chart_url)
|
||||
}
|
||||
@@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp {
|
||||
async fn build_push_oci_image(&self) -> Result<String, String> {
|
||||
// This function orchestrates the build and push process.
|
||||
// It's async to match the trait definition, though the underlying docker commands are blocking.
|
||||
info!("Starting OCI image build and push for '{}'", self.name);
|
||||
debug!("Starting OCI image build and push for '{}'", self.name);
|
||||
|
||||
// 1. Build the image by calling the synchronous helper function.
|
||||
let image_tag = self.image_name();
|
||||
self.build_docker_image(&image_tag)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
||||
info!("Successfully built Docker image: {}", image_tag);
|
||||
debug!("Successfully built Docker image: {}", image_tag);
|
||||
|
||||
// 2. Push the image to the registry.
|
||||
self.push_docker_image(&image_tag)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
||||
info!("Successfully pushed Docker image to: {}", image_tag);
|
||||
debug!("Successfully pushed Docker image to: {}", image_tag);
|
||||
|
||||
Ok(image_tag)
|
||||
}
|
||||
@@ -159,15 +159,17 @@ impl RustWebapp {
|
||||
&self,
|
||||
image_name: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
info!("Generating Dockerfile for '{}'", self.name);
|
||||
debug!("Generating Dockerfile for '{}'", self.name);
|
||||
let _dockerfile_path = self.build_dockerfile()?;
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
let quiet = !log_enabled!(log::Level::Debug);
|
||||
|
||||
let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default()
|
||||
.dockerfile("Dockerfile.harmony")
|
||||
.t(image_name)
|
||||
.q(false)
|
||||
.q(quiet)
|
||||
.version(bollard::query_parameters::BuilderVersion::BuilderV1)
|
||||
.platform("linux/x86_64");
|
||||
|
||||
@@ -204,7 +206,7 @@ impl RustWebapp {
|
||||
&self,
|
||||
image_tag: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
info!("Pushing docker image {image_tag}");
|
||||
debug!("Pushing docker image {image_tag}");
|
||||
|
||||
let docker = Docker::connect_with_socket_defaults().unwrap();
|
||||
|
||||
@@ -533,7 +535,7 @@ spec:
|
||||
chart_dir: &PathBuf,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
|
||||
info!(
|
||||
debug!(
|
||||
"Launching `helm package {}` cli with CWD {}",
|
||||
chart_dirname.to_string_lossy(),
|
||||
&self
|
||||
@@ -578,7 +580,7 @@ spec:
|
||||
let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT);
|
||||
let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name);
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
"Pushing Helm chart {} to {}",
|
||||
packaged_chart_path.to_string_lossy(),
|
||||
oci_push_url
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::HARMONY_DATA_DIR,
|
||||
data::{Id, Version},
|
||||
instrumentation::{self, HarmonyEvent},
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
@@ -50,22 +51,38 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
|
||||
topology: topology.name().into(),
|
||||
interpret: "k3d-installation".into(),
|
||||
message: "installing k3d...".into(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let k3d = k3d_rs::K3d::new(
|
||||
self.score.installation_path.clone(),
|
||||
Some(self.score.cluster_name.clone()),
|
||||
);
|
||||
match k3d.ensure_installed().await {
|
||||
let outcome = match k3d.ensure_installed().await {
|
||||
Ok(_client) => {
|
||||
let msg = format!("k3d cluster {} is installed ", self.score.cluster_name);
|
||||
info!("{msg}");
|
||||
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
|
||||
debug!("{msg}");
|
||||
Ok(Outcome::success(msg))
|
||||
}
|
||||
Err(msg) => Err(InterpretError::new(format!(
|
||||
"K3dInstallationInterpret failed to ensure k3d is installed : {msg}"
|
||||
"failed to ensure k3d is installed : {msg}"
|
||||
))),
|
||||
}
|
||||
};
|
||||
|
||||
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
|
||||
topology: topology.name().into(),
|
||||
interpret: "k3d-installation".into(),
|
||||
outcome: outcome.clone(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
outcome
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::K3dInstallation
|
||||
|
||||
Reference in New Issue
Block a user