Compare commits
3 Commits
feat/nats
...
feat/docke
| Author | SHA1 | Date | |
|---|---|---|---|
| dd6e36889b | |||
| 50b3995449 | |||
| 8d27ecf6de |
@@ -10,7 +10,7 @@ members = [
|
||||
"opnsense-config",
|
||||
"opnsense-config-xml",
|
||||
"harmony_cli",
|
||||
"k3d",
|
||||
"harmony_tools",
|
||||
"harmony_composer",
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
|
||||
@@ -3,58 +3,15 @@ use std::str::FromStr;
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
||||
topology::{HelmCommand, K8sAnywhereConfig, K8sAnywhereTopology, TlsRouter, Topology},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use log::info;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let site1_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
|
||||
"HARMONY_NATS_SITE_1",
|
||||
));
|
||||
let site2_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
|
||||
"HARMONY_NATS_SITE_2",
|
||||
));
|
||||
|
||||
let site1_domain = site1_topo.get_internal_domain().await.unwrap().unwrap();
|
||||
let site2_domain = site2_topo.get_internal_domain().await.unwrap().unwrap();
|
||||
|
||||
let site1_gateway = format!("nats-gateway.{}", site1_domain);
|
||||
let site2_gateway = format!("nats-gateway.{}", site2_domain);
|
||||
|
||||
tokio::join!(
|
||||
deploy_nats(
|
||||
site1_topo,
|
||||
"site-1",
|
||||
vec![("site-2".to_string(), site2_gateway)]
|
||||
),
|
||||
deploy_nats(
|
||||
site2_topo,
|
||||
"site-2",
|
||||
vec![("site-1".to_string(), site1_gateway)]
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async fn deploy_nats<T: Topology + HelmCommand + TlsRouter + 'static>(
|
||||
topology: T,
|
||||
cluster_name: &str,
|
||||
remote_gateways: Vec<(String, String)>,
|
||||
) {
|
||||
topology.ensure_ready().await.unwrap();
|
||||
|
||||
let mut gateway_gateways = String::new();
|
||||
for (name, url) in remote_gateways {
|
||||
gateway_gateways.push_str(&format!(
|
||||
r#"
|
||||
- name: {name}
|
||||
urls:
|
||||
- nats://{url}:7222"#
|
||||
));
|
||||
}
|
||||
|
||||
let values_yaml = Some(format!(
|
||||
// env_logger::init();
|
||||
let values_yaml = Some(
|
||||
r#"config:
|
||||
cluster:
|
||||
enabled: true
|
||||
@@ -68,31 +25,16 @@ async fn deploy_nats<T: Topology + HelmCommand + TlsRouter + 'static>(
|
||||
leafnodes:
|
||||
enabled: false
|
||||
# port: 7422
|
||||
websocket:
|
||||
enabled: true
|
||||
ingress:
|
||||
enabled: true
|
||||
className: openshift-default
|
||||
pathType: Prefix
|
||||
hosts:
|
||||
- nats-ws.{}
|
||||
gateway:
|
||||
enabled: true
|
||||
name: {}
|
||||
port: 7222
|
||||
gateways: {}
|
||||
service:
|
||||
ports:
|
||||
gateway:
|
||||
enabled: true
|
||||
enabled: false
|
||||
# name: my-gateway
|
||||
# port: 7522
|
||||
natsBox:
|
||||
container:
|
||||
image:
|
||||
tag: nonroot"#,
|
||||
topology.get_internal_domain().await.unwrap().unwrap(),
|
||||
cluster_name,
|
||||
gateway_gateways,
|
||||
));
|
||||
tag: nonroot"#
|
||||
.to_string(),
|
||||
);
|
||||
let namespace = "nats";
|
||||
let nats = HelmChartScore {
|
||||
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
|
||||
@@ -110,7 +52,12 @@ natsBox:
|
||||
)),
|
||||
};
|
||||
|
||||
harmony_cli::run(Inventory::autoload(), topology, vec![Box::new(nats)], None)
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(nats)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -9,6 +9,14 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
opnsense-config = { path = "../opnsense-config" }
|
||||
opnsense-config-xml = { path = "../opnsense-config-xml" }
|
||||
harmony_macros = { path = "../harmony_macros" }
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
||||
harmony_secret = { path = "../harmony_secret" }
|
||||
harmony_tools = { path = "../harmony_tools" }
|
||||
hex = "0.4"
|
||||
reqwest = { version = "0.11", features = [
|
||||
"blocking",
|
||||
@@ -26,10 +34,6 @@ log.workspace = true
|
||||
env_logger.workspace = true
|
||||
async-trait.workspace = true
|
||||
cidr.workspace = true
|
||||
opnsense-config = { path = "../opnsense-config" }
|
||||
opnsense-config-xml = { path = "../opnsense-config-xml" }
|
||||
harmony_macros = { path = "../harmony_macros" }
|
||||
harmony_types = { path = "../harmony_types" }
|
||||
uuid.workspace = true
|
||||
url.workspace = true
|
||||
kube = { workspace = true, features = ["derive"] }
|
||||
@@ -39,7 +43,6 @@ http.workspace = true
|
||||
serde-value.workspace = true
|
||||
helm-wrapper-rs = "0.4.0"
|
||||
non-blank-string-rs = "1.0.4"
|
||||
k3d-rs = { path = "../k3d" }
|
||||
directories.workspace = true
|
||||
lazy_static.workspace = true
|
||||
dockerfile_builder = "0.1.5"
|
||||
@@ -71,9 +74,6 @@ base64.workspace = true
|
||||
thiserror.workspace = true
|
||||
once_cell = "1.21.3"
|
||||
walkdir = "2.5.0"
|
||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
||||
harmony_secret = { path = "../harmony_secret" }
|
||||
askama.workspace = true
|
||||
sqlx.workspace = true
|
||||
inquire.workspace = true
|
||||
|
||||
11
harmony/src/domain/topology/docker/mod.rs
Normal file
11
harmony/src/domain/topology/docker/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Docker Capability
|
||||
#[async_trait]
|
||||
pub trait Docker {
|
||||
async fn ensure_installed(&self) -> Result<(), String>;
|
||||
fn get_docker_env(&self) -> HashMap<String, String>;
|
||||
fn docker_command(&self) -> std::process::Command;
|
||||
}
|
||||
@@ -1,5 +1 @@
|
||||
use std::process::Command;
|
||||
|
||||
pub trait HelmCommand {
|
||||
fn get_helm_command(&self) -> Command;
|
||||
}
|
||||
pub trait HelmCommand {}
|
||||
|
||||
@@ -16,7 +16,7 @@ use kube::{
|
||||
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||
},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::{DynamicResourceScope, ErrorResponse},
|
||||
core::ErrorResponse,
|
||||
discovery::{ApiCapabilities, Scope},
|
||||
error::DiscoveryError,
|
||||
runtime::reflector::Lookup,
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
process::Command,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine, engine::general_purpose};
|
||||
use harmony_tools::K3d;
|
||||
use harmony_types::rfc1123::Rfc1123Name;
|
||||
use k8s_openapi::api::{
|
||||
core::v1::Secret,
|
||||
@@ -13,10 +19,12 @@ use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{
|
||||
config::HARMONY_DATA_DIR,
|
||||
executors::ExecutorError,
|
||||
interpret::InterpretStatus,
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
docker::DockerInstallationScore,
|
||||
k3d::K3DInstallationScore,
|
||||
k8s::ingress::{K8sIngressScore, PathType},
|
||||
monitoring::{
|
||||
@@ -35,7 +43,6 @@ use crate::{
|
||||
service_monitor::ServiceMonitor,
|
||||
},
|
||||
},
|
||||
okd::crd::ingresses_config::Ingress as IngressResource,
|
||||
okd::route::OKDTlsPassthroughScore,
|
||||
prometheus::{
|
||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||
@@ -43,7 +50,7 @@ use crate::{
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{TlsRoute, TlsRouter, ingress::Ingress},
|
||||
topology::{Docker, TlsRoute, TlsRouter, ingress::Ingress},
|
||||
};
|
||||
|
||||
use super::super::{
|
||||
@@ -108,32 +115,8 @@ impl K8sclient for K8sAnywhereTopology {
|
||||
|
||||
#[async_trait]
|
||||
impl TlsRouter for K8sAnywhereTopology {
|
||||
async fn get_internal_domain(&self) -> Result<Option<String>, String> {
|
||||
match self.get_k8s_distribution().await.map_err(|e| {
|
||||
format!(
|
||||
"Could not get internal domain, error getting k8s distribution : {}",
|
||||
e.to_string()
|
||||
)
|
||||
})? {
|
||||
KubernetesDistribution::OpenshiftFamily => {
|
||||
let client = self.k8s_client().await?;
|
||||
if let Some(ingress_config) = client
|
||||
.get_resource::<IngressResource>("cluster", None)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
format!("Error attempting to get ingress config : {}", e.to_string())
|
||||
})?
|
||||
{
|
||||
debug!("Found ingress config {:?}", ingress_config.spec);
|
||||
Ok(ingress_config.spec.domain.clone())
|
||||
} else {
|
||||
warn!("Could not find a domain configured in this cluster");
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
KubernetesDistribution::K3sFamily => todo!(),
|
||||
KubernetesDistribution::Default => todo!(),
|
||||
}
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// Returns the port that this router exposes externally.
|
||||
@@ -375,6 +358,24 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Docker for K8sAnywhereTopology {
|
||||
async fn ensure_installed(&self) -> Result<(), String> {
|
||||
DockerInstallationScore::default()
|
||||
.interpret(&Inventory::empty(), self)
|
||||
.await
|
||||
.map_err(|e| format!("Could not ensure docker is installed : {e}"))?;
|
||||
Ok(())
|
||||
}
|
||||
fn get_docker_env(&self) -> HashMap<String, String> {
|
||||
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).get_docker_env()
|
||||
}
|
||||
|
||||
fn docker_command(&self) -> std::process::Command {
|
||||
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).command()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for K8sAnywhereTopology {
|
||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
@@ -762,7 +763,7 @@ impl K8sAnywhereTopology {
|
||||
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
|
||||
// good implementation due to the stateful nature of the k3d thing. Which is why I went
|
||||
// with this solution for now
|
||||
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
||||
let k3d = K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
||||
let state = match k3d.get_client().await {
|
||||
Ok(client) => K8sState {
|
||||
client: Arc::new(K8sClient::new(client)),
|
||||
@@ -1112,21 +1113,7 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
||||
}
|
||||
}
|
||||
|
||||
impl HelmCommand for K8sAnywhereTopology {
|
||||
fn get_helm_command(&self) -> Command {
|
||||
let mut cmd = Command::new("helm");
|
||||
if let Some(k) = &self.config.kubeconfig {
|
||||
cmd.args(["--kubeconfig", k]);
|
||||
}
|
||||
|
||||
if let Some(c) = &self.config.k8s_context {
|
||||
cmd.args(["--kube-context", c]);
|
||||
}
|
||||
|
||||
info!("Using helm command {cmd:?}");
|
||||
cmd
|
||||
}
|
||||
}
|
||||
impl HelmCommand for K8sAnywhereTopology {}
|
||||
|
||||
#[async_trait]
|
||||
impl TenantManager for K8sAnywhereTopology {
|
||||
@@ -1147,7 +1134,7 @@ impl TenantManager for K8sAnywhereTopology {
|
||||
#[async_trait]
|
||||
impl Ingress for K8sAnywhereTopology {
|
||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||
use log::{trace, warn};
|
||||
use log::{debug, trace, warn};
|
||||
|
||||
let client = self.k8s_client().await?;
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{PreparationError, PreparationOutcome, Topology};
|
||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
||||
|
||||
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct LocalhostTopology;
|
||||
@@ -19,3 +19,6 @@ impl Topology for LocalhostTopology {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Delete this, temp for test
|
||||
impl HelmCommand for LocalhostTopology {}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
mod docker;
|
||||
mod failover;
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
pub mod node_exporter;
|
||||
pub mod opnsense;
|
||||
pub use docker::*;
|
||||
pub use failover::*;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
|
||||
@@ -112,13 +112,12 @@ pub trait TlsRouter: Send + Sync {
|
||||
/// HAProxy frontend→backend \"postgres-upstream\".
|
||||
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
||||
|
||||
/// Gets the base domain of this cluster. On openshift family clusters, this is the domain
|
||||
/// used by default for all components, including the default ingress controller that
|
||||
/// transforms ingress to routes.
|
||||
/// Gets the base domain that can be used to deploy applications that will be automatically
|
||||
/// routed to this cluster.
|
||||
///
|
||||
/// For example, get_internal_domain on a cluster that has `console-openshift-console.apps.mycluster.something`
|
||||
/// will return `apps.mycluster.something`
|
||||
async fn get_internal_domain(&self) -> Result<Option<String>, String>;
|
||||
/// For example, if we have *.apps.nationtech.io pointing to a public load balancer, then this
|
||||
/// function would install route apps.nationtech.io
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String>;
|
||||
|
||||
/// Returns the port that this router exposes externally.
|
||||
async fn get_router_port(&self) -> u16;
|
||||
|
||||
79
harmony/src/modules/docker.rs
Normal file
79
harmony/src/modules/docker.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
config::HARMONY_DATA_DIR,
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{Docker, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DockerInstallationScore {
|
||||
pub installation_path: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for DockerInstallationScore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
installation_path: HARMONY_DATA_DIR.join("docker"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + Docker> Score<T> for DockerInstallationScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(DockerInstallationInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"DockerInstallationScore".into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DockerInstallationInterpret {
|
||||
score: DockerInstallationScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Docker> Interpret<T> for DockerInstallationInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let docker = harmony_tools::Docker::new(self.score.installation_path.clone());
|
||||
|
||||
match docker.ensure_installed().await {
|
||||
Ok(_) => {
|
||||
let msg = "Docker is installed and ready".to_string();
|
||||
debug!("{msg}");
|
||||
Ok(Outcome::success(msg))
|
||||
}
|
||||
Err(msg) => Err(InterpretError::new(format!(
|
||||
"failed to ensure docker is installed : {msg}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("DockerInstallation")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -6,11 +6,15 @@ use crate::topology::{HelmCommand, Topology};
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
use helm_wrapper_rs;
|
||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||
use log::{debug, info, warn};
|
||||
pub use non_blank_string_rs::NonBlankString;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::process::{Output, Stdio};
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Output, Stdio};
|
||||
use std::str::FromStr;
|
||||
use temp_file::TempFile;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@@ -61,7 +65,7 @@ pub struct HelmChartInterpret {
|
||||
pub score: HelmChartScore,
|
||||
}
|
||||
impl HelmChartInterpret {
|
||||
fn add_repo<T: HelmCommand>(&self, topology: &T) -> Result<(), InterpretError> {
|
||||
fn add_repo(&self) -> Result<(), InterpretError> {
|
||||
let repo = match &self.score.repository {
|
||||
Some(repo) => repo,
|
||||
None => {
|
||||
@@ -80,7 +84,7 @@ impl HelmChartInterpret {
|
||||
add_args.push("--force-update");
|
||||
}
|
||||
|
||||
let add_output = run_helm_command(topology, &add_args)?;
|
||||
let add_output = run_helm_command(&add_args)?;
|
||||
let full_output = format!(
|
||||
"{}\n{}",
|
||||
String::from_utf8_lossy(&add_output.stdout),
|
||||
@@ -96,19 +100,23 @@ impl HelmChartInterpret {
|
||||
}
|
||||
}
|
||||
|
||||
fn run_helm_command<T: HelmCommand>(topology: &T, args: &[&str]) -> Result<Output, InterpretError> {
|
||||
let mut helm_cmd = topology.get_helm_command();
|
||||
helm_cmd.args(args);
|
||||
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
||||
let command_str = format!("helm {}", args.join(" "));
|
||||
debug!(
|
||||
"Got KUBECONFIG: `{}`",
|
||||
std::env::var("KUBECONFIG").unwrap_or("".to_string())
|
||||
);
|
||||
debug!("Running Helm command: `{}`", command_str);
|
||||
|
||||
debug!("Running Helm command: `{:?}`", helm_cmd);
|
||||
|
||||
let output = helm_cmd
|
||||
let output = Command::new("helm")
|
||||
.args(args)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
InterpretError::new(format!(
|
||||
"Failed to execute helm command '{helm_cmd:?}': {e}. Is helm installed and in PATH?",
|
||||
"Failed to execute helm command '{}': {}. Is helm installed and in PATH?",
|
||||
command_str, e
|
||||
))
|
||||
})?;
|
||||
|
||||
@@ -116,13 +124,13 @@ fn run_helm_command<T: HelmCommand>(topology: &T, args: &[&str]) -> Result<Outpu
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
warn!(
|
||||
"Helm command `{helm_cmd:?}` failed with status: {}\nStdout:\n{stdout}\nStderr:\n{stderr}",
|
||||
output.status
|
||||
"Helm command `{}` failed with status: {}\nStdout:\n{}\nStderr:\n{}",
|
||||
command_str, output.status, stdout, stderr
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Helm command `{helm_cmd:?}` finished successfully. Status: {}",
|
||||
output.status
|
||||
"Helm command `{}` finished successfully. Status: {}",
|
||||
command_str, output.status
|
||||
);
|
||||
}
|
||||
|
||||
@@ -134,7 +142,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let ns = self
|
||||
.score
|
||||
@@ -142,62 +150,98 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
||||
|
||||
self.add_repo(topology)?;
|
||||
|
||||
let mut args = if self.score.install_only {
|
||||
vec!["install"]
|
||||
} else {
|
||||
vec!["upgrade", "--install"]
|
||||
let tf: TempFile;
|
||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
||||
Some(yaml_str) => {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
debug!(
|
||||
"values yaml string for chart {} :\n {yaml_str}",
|
||||
self.score.chart_name
|
||||
);
|
||||
Some(tf.path())
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
args.extend(vec![
|
||||
self.add_repo()?;
|
||||
|
||||
let helm_executor = DefaultHelmExecutor::new_with_opts(
|
||||
&NonBlankString::from_str("helm").unwrap(),
|
||||
None,
|
||||
900,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let mut helm_options = Vec::new();
|
||||
if self.score.create_namespace {
|
||||
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
||||
}
|
||||
|
||||
if self.score.install_only {
|
||||
let chart_list = match helm_executor.list(Some(ns)) {
|
||||
Ok(charts) => charts,
|
||||
Err(e) => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to list scores in namespace {:?} because of error : {}",
|
||||
self.score.namespace, e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
if chart_list
|
||||
.iter()
|
||||
.any(|item| item.name == self.score.release_name.to_string())
|
||||
{
|
||||
info!(
|
||||
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
|
||||
self.score.release_name, ns
|
||||
);
|
||||
|
||||
return Ok(Outcome::success(format!(
|
||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
||||
self.score.release_name
|
||||
)));
|
||||
} else {
|
||||
info!(
|
||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
||||
self.score.release_name, ns
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let res = helm_executor.install_or_upgrade(
|
||||
ns,
|
||||
&self.score.release_name,
|
||||
&self.score.chart_name,
|
||||
"--namespace",
|
||||
&ns,
|
||||
]);
|
||||
self.score.chart_version.as_ref(),
|
||||
self.score.values_overrides.as_ref(),
|
||||
yaml_path,
|
||||
Some(&helm_options),
|
||||
);
|
||||
|
||||
if self.score.create_namespace {
|
||||
args.push("--create-namespace");
|
||||
}
|
||||
let status = match res {
|
||||
Ok(status) => status,
|
||||
Err(err) => return Err(InterpretError::new(err.to_string())),
|
||||
};
|
||||
|
||||
if let Some(version) = &self.score.chart_version {
|
||||
args.push("--version");
|
||||
args.push(&version);
|
||||
}
|
||||
|
||||
let tf: TempFile;
|
||||
if let Some(yaml_str) = &self.score.values_yaml {
|
||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||
args.push("--values");
|
||||
args.push(tf.path().to_str().unwrap());
|
||||
}
|
||||
|
||||
let overrides_strings: Vec<String>;
|
||||
if let Some(overrides) = &self.score.values_overrides {
|
||||
overrides_strings = overrides
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{key}={value}"))
|
||||
.collect();
|
||||
for o in overrides_strings.iter() {
|
||||
args.push("--set");
|
||||
args.push(&o);
|
||||
}
|
||||
}
|
||||
|
||||
let output = run_helm_command(topology, &args)?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(Outcome::success(format!(
|
||||
match status {
|
||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
|
||||
"Helm Chart {} deployed",
|
||||
self.score.release_name
|
||||
)))
|
||||
} else {
|
||||
Err(InterpretError::new(format!(
|
||||
"Helm Chart {} installation failed: {}",
|
||||
self.score.release_name,
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
)))
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
|
||||
"Helm Chart {} pending install...",
|
||||
self.score.release_name
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
|
||||
"Helm Chart {} pending upgrade...",
|
||||
self.score.release_name
|
||||
))),
|
||||
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
||||
"Helm Chart {} installation failed",
|
||||
self.score.release_name
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_tools::K3d;
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
@@ -10,7 +11,7 @@ use crate::{
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
topology::{Docker, Topology},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
|
||||
@@ -29,7 +30,7 @@ impl Default for K3DInstallationScore {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for K3DInstallationScore {
|
||||
impl<T: Topology + Docker> Score<T> for K3DInstallationScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(K3dInstallationInterpret {
|
||||
score: self.clone(),
|
||||
@@ -47,19 +48,25 @@ pub struct K3dInstallationInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
|
||||
impl<T: Topology + Docker> Interpret<T> for K3dInstallationInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let k3d = k3d_rs::K3d::new(
|
||||
let k3d = K3d::new(
|
||||
self.score.installation_path.clone(),
|
||||
Some(self.score.cluster_name.clone()),
|
||||
);
|
||||
|
||||
Docker::ensure_installed(topology)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Docker requirement for k3d failed: {e}")))?;
|
||||
|
||||
match k3d.ensure_installed().await {
|
||||
Ok(_client) => {
|
||||
// Ensure Docker is also ready as k3d depends on it
|
||||
|
||||
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
|
||||
debug!("{msg}");
|
||||
Ok(Outcome::success(msg))
|
||||
|
||||
@@ -4,6 +4,7 @@ pub mod brocade;
|
||||
pub mod cert_manager;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
pub mod docker;
|
||||
pub mod dummy;
|
||||
pub mod helm;
|
||||
pub mod http;
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::topology::{FailoverTopology, TlsRoute, TlsRouter};
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TlsRouter> TlsRouter for FailoverTopology<T> {
|
||||
async fn get_internal_domain(&self) -> Result<Option<String>, String> {
|
||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,214 +0,0 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta};
|
||||
use k8s_openapi::{ClusterResourceScope, Resource};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Ingress {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub api_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub kind: Option<String>,
|
||||
pub metadata: ObjectMeta,
|
||||
|
||||
pub spec: IngressSpec,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<IngressStatus>,
|
||||
}
|
||||
|
||||
impl Resource for Ingress {
|
||||
const API_VERSION: &'static str = "config.openshift.io/v1";
|
||||
const GROUP: &'static str = "config.openshift.io";
|
||||
const VERSION: &'static str = "v1";
|
||||
const KIND: &'static str = "Ingress";
|
||||
const URL_PATH_SEGMENT: &'static str = "ingresses";
|
||||
type Scope = ClusterResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for Ingress {
|
||||
type Ty = ObjectMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Ingress {
|
||||
fn default() -> Self {
|
||||
Ingress {
|
||||
api_version: Some("config.openshift.io/v1".to_string()),
|
||||
kind: Some("Ingress".to_string()),
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: IngressSpec::default(),
|
||||
status: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IngressList {
|
||||
pub metadata: ListMeta,
|
||||
pub items: Vec<Ingress>,
|
||||
}
|
||||
|
||||
impl Default for IngressList {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metadata: ListMeta::default(),
|
||||
items: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for IngressList {
|
||||
const API_VERSION: &'static str = "config.openshift.io/v1";
|
||||
const GROUP: &'static str = "config.openshift.io";
|
||||
const VERSION: &'static str = "v1";
|
||||
const KIND: &'static str = "IngressList";
|
||||
const URL_PATH_SEGMENT: &'static str = "ingresses";
|
||||
type Scope = ClusterResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for IngressList {
|
||||
type Ty = ListMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IngressSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub apps_domain: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub component_routes: Option<Vec<ComponentRouteSpec>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub domain: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub load_balancer: Option<LoadBalancer>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub required_hsts_policies: Option<Vec<RequiredHSTSPolicy>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ComponentRouteSpec {
|
||||
pub hostname: String,
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub serving_cert_key_pair_secret: Option<SecretNameReference>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SecretNameReference {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoadBalancer {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub platform: Option<IngressPlatform>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IngressPlatform {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub aws: Option<AWSPlatformLoadBalancer>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub r#type: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AWSPlatformLoadBalancer {
|
||||
pub r#type: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RequiredHSTSPolicy {
|
||||
pub domain_patterns: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub include_sub_domains_policy: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_age: Option<MaxAgePolicy>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub namespace_selector: Option<k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub preload_policy: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MaxAgePolicy {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub largest_max_age: Option<i32>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub smallest_max_age: Option<i32>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IngressStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub component_routes: Option<Vec<ComponentRouteStatus>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub default_placement: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ComponentRouteStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub conditions: Option<Vec<k8s_openapi::apimachinery::pkg::apis::meta::v1::Condition>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub consuming_users: Option<Vec<String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub current_hostnames: Option<Vec<String>>,
|
||||
|
||||
pub default_hostname: String,
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub related_objects: Vec<ObjectReference>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ObjectReference {
|
||||
pub group: String,
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub resource: String,
|
||||
}
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
pub mod nmstate;
|
||||
pub mod route;
|
||||
pub mod ingresses_config;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta, Time};
|
||||
use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
|
||||
use k8s_openapi::{NamespaceResourceScope, Resource};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
||||
@@ -7,14 +7,11 @@ use harmony::{
|
||||
};
|
||||
use log::{error, info, log_enabled};
|
||||
use std::io::Write;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::sync::Mutex;
|
||||
|
||||
pub fn init() {
|
||||
static INITIALIZED: OnceLock<()> = OnceLock::new();
|
||||
INITIALIZED.get_or_init(|| {
|
||||
configure_logger();
|
||||
handle_events();
|
||||
});
|
||||
}
|
||||
|
||||
fn configure_logger() {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
[package]
|
||||
name = "k3d-rs"
|
||||
name = "harmony_tools"
|
||||
description = "Install tools such as k3d, docker and more"
|
||||
edition = "2021"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
@@ -16,6 +17,7 @@ url.workspace = true
|
||||
sha2 = "0.10.8"
|
||||
futures-util = "0.3.31"
|
||||
kube.workspace = true
|
||||
inquire.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = { workspace = true }
|
||||
326
harmony_tools/src/docker.rs
Normal file
326
harmony_tools/src/docker.rs
Normal file
@@ -0,0 +1,326 @@
|
||||
use crate::downloadable_asset::DownloadableAsset;
|
||||
use inquire::Select;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use url::Url;
|
||||
|
||||
pub struct Docker {
|
||||
base_dir: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum DockerVariant {
|
||||
Standard,
|
||||
Rootless,
|
||||
Manual,
|
||||
}
|
||||
|
||||
impl fmt::Display for DockerVariant {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DockerVariant::Standard => write!(f, "Standard Docker (requires sudo)"),
|
||||
DockerVariant::Rootless => write!(f, "Rootless Docker (no sudo required)"),
|
||||
DockerVariant::Manual => {
|
||||
write!(f, "Exit and install manually (Docker or podman-docker)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Docker {
|
||||
pub fn new(base_dir: PathBuf) -> Self {
|
||||
Self { base_dir }
|
||||
}
|
||||
|
||||
/// Provides the DOCKER_HOST and DOCKER_SOCK env vars for local usage.
|
||||
///
|
||||
/// If a rootless Docker installation is detected in the user's home directory,
|
||||
/// it returns the appropriate `DOCKER_HOST` pointing to the user's Docker socket.
|
||||
/// Otherwise, it returns an empty HashMap, assuming the standard system-wide
|
||||
/// Docker installation is used.
|
||||
pub fn get_docker_env(&self) -> HashMap<String, String> {
|
||||
let mut env = HashMap::new();
|
||||
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
let rootless_sock = PathBuf::from(&home).join(".docker/run/docker.sock");
|
||||
let rootless_bin = PathBuf::from(&home).join("bin/docker");
|
||||
|
||||
if rootless_bin.exists() && rootless_sock.exists() {
|
||||
let docker_host = format!("unix://{}", rootless_sock.display());
|
||||
debug!(
|
||||
"Detected rootless Docker, setting DOCKER_HOST={}",
|
||||
docker_host
|
||||
);
|
||||
env.insert("DOCKER_HOST".to_string(), docker_host);
|
||||
}
|
||||
}
|
||||
|
||||
env
|
||||
}
|
||||
|
||||
/// Gets the path to the docker binary
|
||||
pub fn get_bin_path(&self) -> PathBuf {
|
||||
// Check standard PATH first
|
||||
if let Ok(path) = std::process::Command::new("which")
|
||||
.arg("docker")
|
||||
.output()
|
||||
.map(|o| PathBuf::from(String::from_utf8_lossy(&o.stdout).trim()))
|
||||
{
|
||||
if path.exists() {
|
||||
debug!("Found Docker in PATH: {:?}", path);
|
||||
return path;
|
||||
}
|
||||
}
|
||||
|
||||
// Check common rootless location
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
let rootless_path = PathBuf::from(home).join("bin/docker");
|
||||
if rootless_path.exists() {
|
||||
debug!("Found rootless Docker at: {:?}", rootless_path);
|
||||
return rootless_path;
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Docker not found in PATH or rootless location, using 'docker' from PATH");
|
||||
PathBuf::from("docker")
|
||||
}
|
||||
|
||||
/// Checks if Docker is installed and the daemon is responsive.
|
||||
pub fn is_installed(&self) -> bool {
|
||||
trace!("Checking if Docker is installed and responsive");
|
||||
|
||||
self.command()
|
||||
.arg("info")
|
||||
.output()
|
||||
.map(|output| {
|
||||
if output.status.success() {
|
||||
trace!("Docker daemon is responsive");
|
||||
true
|
||||
} else {
|
||||
trace!(
|
||||
"Docker daemon check failed with status: {:?}",
|
||||
output.status
|
||||
);
|
||||
false
|
||||
}
|
||||
})
|
||||
.map_err(|e| {
|
||||
trace!("Failed to execute Docker daemon check: {}", e);
|
||||
e
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Prompts the user to choose an installation method
|
||||
fn prompt_for_installation(&self) -> DockerVariant {
|
||||
let options = vec![
|
||||
DockerVariant::Standard,
|
||||
DockerVariant::Rootless,
|
||||
DockerVariant::Manual,
|
||||
];
|
||||
|
||||
Select::new(
|
||||
"Docker binary was not found. How would you like to proceed?",
|
||||
options,
|
||||
)
|
||||
.with_help_message("Standard requires sudo. Rootless runs in user space.")
|
||||
.prompt()
|
||||
.unwrap_or(DockerVariant::Manual)
|
||||
}
|
||||
|
||||
/// Installs docker using the official shell script
|
||||
pub async fn install(&self, variant: DockerVariant) -> Result<(), String> {
|
||||
let (script_url, script_name, use_sudo) = match variant {
|
||||
DockerVariant::Standard => ("https://get.docker.com", "get-docker.sh", true),
|
||||
DockerVariant::Rootless => (
|
||||
"https://get.docker.com/rootless",
|
||||
"get-docker-rootless.sh",
|
||||
false,
|
||||
),
|
||||
DockerVariant::Manual => return Err("Manual installation selected".to_string()),
|
||||
};
|
||||
|
||||
info!("Installing {}...", variant);
|
||||
debug!("Downloading installation script from: {}", script_url);
|
||||
|
||||
// Download the installation script
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(script_url).map_err(|e| {
|
||||
error!("Failed to parse installation script URL: {}", e);
|
||||
format!("Failed to parse installation script URL: {}", e)
|
||||
})?,
|
||||
file_name: script_name.to_string(),
|
||||
checksum: None,
|
||||
};
|
||||
|
||||
let downloaded_script = asset
|
||||
.download_to_path(self.base_dir.join("scripts"))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to download installation script: {}", e);
|
||||
format!("Failed to download installation script: {}", e)
|
||||
})?;
|
||||
|
||||
debug!("Installation script downloaded to: {:?}", downloaded_script);
|
||||
|
||||
// Execute the installation script
|
||||
let mut cmd = std::process::Command::new("sh");
|
||||
if use_sudo {
|
||||
cmd.arg("sudo").arg("sh");
|
||||
}
|
||||
cmd.arg(&downloaded_script);
|
||||
|
||||
debug!("Executing installation command: {:?}", cmd);
|
||||
|
||||
let status = cmd.status().map_err(|e| {
|
||||
error!("Failed to execute docker installation script: {}", e);
|
||||
format!("Failed to execute docker installation script: {}", e)
|
||||
})?;
|
||||
|
||||
if status.success() {
|
||||
info!("{} installed successfully", variant);
|
||||
if variant == DockerVariant::Rootless {
|
||||
info!("Running rootless setup tool to install dependencies and start service...");
|
||||
let mut setup_cmd = std::process::Command::new("sh");
|
||||
|
||||
// Set PATH to include ~/bin where the script was likely installed
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
let bin_path = format!("{}/bin", home);
|
||||
if let Ok(current_path) = std::env::var("PATH") {
|
||||
setup_cmd.env("PATH", format!("{}:{}", bin_path, current_path));
|
||||
}
|
||||
setup_cmd.arg(format!("{}/bin/dockerd-rootless-setuptool.sh", home));
|
||||
} else {
|
||||
setup_cmd.arg("dockerd-rootless-setuptool.sh");
|
||||
}
|
||||
|
||||
setup_cmd.arg("install");
|
||||
|
||||
debug!("Executing rootless setup command: {:?}", setup_cmd);
|
||||
let setup_status = setup_cmd.status().map_err(|e| {
|
||||
error!("Failed to execute rootless setup tool: {}", e);
|
||||
format!("Failed to execute rootless setup tool: {}", e)
|
||||
})?;
|
||||
|
||||
if !setup_status.success() {
|
||||
warn!("Rootless setup tool finished with non-zero exit code. You may need to install 'uidmap' or start the service manually.");
|
||||
}
|
||||
|
||||
warn!("Please follow the instructions above to finish rootless setup (environment variables).");
|
||||
}
|
||||
|
||||
// Validate the installation by running hello-world
|
||||
self.validate_installation()?;
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
error!(
|
||||
"{} installation script failed with exit code: {:?} \n\nOutput:\n{:?}",
|
||||
variant,
|
||||
status.code(),
|
||||
cmd.output(),
|
||||
);
|
||||
Err(format!("{} installation script failed", variant))
|
||||
}
|
||||
}
|
||||
|
||||
/// Validates the Docker installation by running a test container.
|
||||
///
|
||||
/// This method runs `docker run --rm hello-world` to verify that Docker
|
||||
/// is properly installed and functional.
|
||||
fn validate_installation(&self) -> Result<(), String> {
|
||||
info!("Validating Docker installation by running hello-world container...");
|
||||
|
||||
let output = self
|
||||
.command()
|
||||
.args(["run", "--rm", "hello-world"])
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
error!("Failed to execute hello-world validation: {}", e);
|
||||
format!("Failed to execute hello-world validation: {}", e)
|
||||
})?;
|
||||
|
||||
if output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
if stdout.contains("Hello from Docker!") {
|
||||
info!("Docker installation validated successfully");
|
||||
trace!("Validation output: {}", stdout);
|
||||
Ok(())
|
||||
} else {
|
||||
warn!("Hello-world container ran but expected output not found");
|
||||
debug!("Output was: {}", stdout);
|
||||
Err("Docker validation failed: unexpected output from hello-world".to_string())
|
||||
}
|
||||
} else {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
error!(
|
||||
"Hello-world validation failed with exit code: {:?}",
|
||||
output.status.code()
|
||||
);
|
||||
debug!("Validation stderr: {}", stderr);
|
||||
if !stderr.is_empty() {
|
||||
Err(format!("Docker validation failed: {}", stderr.trim()))
|
||||
} else {
|
||||
Err(
|
||||
"Docker validation failed: hello-world container did not run successfully"
|
||||
.to_string(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures docker is installed, prompting if necessary
|
||||
pub async fn ensure_installed(&self) -> Result<(), String> {
|
||||
if self.is_installed() {
|
||||
debug!("Docker is already installed at: {:?}", self.get_bin_path());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Docker is not installed, prompting for installation method");
|
||||
match self.prompt_for_installation() {
|
||||
DockerVariant::Manual => {
|
||||
info!("User chose manual installation");
|
||||
Err("Docker installation cancelled by user. Please install docker or podman-docker manually.".to_string())
|
||||
}
|
||||
variant => self.install(variant).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a pre-configured Command for running Docker commands.
|
||||
///
|
||||
/// The returned Command is set up with:
|
||||
/// - The correct Docker binary path (handles rootless installations)
|
||||
/// - Appropriate environment variables (e.g., DOCKER_HOST for rootless)
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use harmony_tools::Docker;
|
||||
/// # use std::path::PathBuf;
|
||||
/// # let docker = Docker::new(PathBuf::from("."));
|
||||
/// let mut cmd = docker.command();
|
||||
/// cmd.args(["ps", "-a"]);
|
||||
/// // Now cmd is ready to be executed
|
||||
/// ```
|
||||
pub fn command(&self) -> std::process::Command {
|
||||
let bin_path = self.get_bin_path();
|
||||
trace!("Creating Docker command with binary: {:?}", bin_path);
|
||||
|
||||
let mut cmd = std::process::Command::new(&bin_path);
|
||||
|
||||
// Add Docker-specific environment variables
|
||||
let env = self.get_docker_env();
|
||||
if !env.is_empty() {
|
||||
trace!("Setting Docker environment variables: {:?}", env);
|
||||
for (key, value) in env {
|
||||
cmd.env(key, value);
|
||||
}
|
||||
} else {
|
||||
trace!("No Docker-specific environment variables to set");
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
}
|
||||
@@ -39,11 +39,20 @@ const CHECKSUM_FAILED_MSG: &str = "Downloaded file failed checksum verification"
|
||||
pub(crate) struct DownloadableAsset {
|
||||
pub(crate) url: Url,
|
||||
pub(crate) file_name: String,
|
||||
pub(crate) checksum: String,
|
||||
pub(crate) checksum: Option<String>,
|
||||
}
|
||||
|
||||
impl DownloadableAsset {
|
||||
fn verify_checksum(&self, file: PathBuf) -> bool {
|
||||
// Skip verification if no checksum is provided
|
||||
let expected_checksum = match &self.checksum {
|
||||
Some(checksum) => checksum,
|
||||
None => {
|
||||
debug!("No checksum provided, skipping verification");
|
||||
return file.exists();
|
||||
}
|
||||
};
|
||||
|
||||
if !file.exists() {
|
||||
debug!("File does not exist: {:?}", file);
|
||||
return false;
|
||||
@@ -76,10 +85,10 @@ impl DownloadableAsset {
|
||||
let result = hasher.finalize();
|
||||
let calculated_hash = format!("{:x}", result);
|
||||
|
||||
debug!("Expected checksum: {}", self.checksum);
|
||||
debug!("Expected checksum: {}", expected_checksum);
|
||||
debug!("Calculated checksum: {}", calculated_hash);
|
||||
|
||||
calculated_hash == self.checksum
|
||||
calculated_hash == *expected_checksum
|
||||
}
|
||||
|
||||
/// Downloads the asset to the specified directory, verifying its checksum.
|
||||
@@ -151,7 +160,8 @@ impl DownloadableAsset {
|
||||
file.flush().await.expect("Failed to flush file");
|
||||
drop(file);
|
||||
|
||||
if !self.verify_checksum(target_file_path.clone()) {
|
||||
// Only verify checksum if one was provided
|
||||
if self.checksum.is_some() && !self.verify_checksum(target_file_path.clone()) {
|
||||
return Err(CHECKSUM_FAILED_MSG.to_string());
|
||||
}
|
||||
|
||||
@@ -202,7 +212,7 @@ mod tests {
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: TEST_CONTENT_HASH.to_string(),
|
||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
||||
};
|
||||
|
||||
let result = asset
|
||||
@@ -226,7 +236,7 @@ mod tests {
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: TEST_CONTENT_HASH.to_string(),
|
||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
||||
};
|
||||
|
||||
let target_file_path = folder.join(&asset.file_name);
|
||||
@@ -248,7 +258,7 @@ mod tests {
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: TEST_CONTENT_HASH.to_string(),
|
||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
||||
};
|
||||
|
||||
let result = asset.download_to_path(folder.join("error")).await;
|
||||
@@ -269,7 +279,7 @@ mod tests {
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: TEST_CONTENT_HASH.to_string(),
|
||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
||||
};
|
||||
|
||||
let join_handle =
|
||||
@@ -293,11 +303,58 @@ mod tests {
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/specific/path.txt").to_string()).unwrap(),
|
||||
file_name: "path.txt".to_string(),
|
||||
checksum: TEST_CONTENT_HASH.to_string(),
|
||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
||||
};
|
||||
|
||||
let result = asset.download_to_path(folder).await.unwrap();
|
||||
let downloaded_content = std::fs::read_to_string(result).unwrap();
|
||||
assert_eq!(downloaded_content, TEST_CONTENT);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_download_without_checksum() {
|
||||
let (folder, server) = setup_test();
|
||||
|
||||
server.expect(
|
||||
Expectation::matching(matchers::any())
|
||||
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
|
||||
);
|
||||
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: None,
|
||||
};
|
||||
|
||||
let result = asset
|
||||
.download_to_path(folder.join("no_checksum"))
|
||||
.await
|
||||
.unwrap();
|
||||
let downloaded_content = std::fs::read_to_string(result).unwrap();
|
||||
assert_eq!(downloaded_content, TEST_CONTENT);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_download_without_checksum_already_exists() {
|
||||
let (folder, server) = setup_test();
|
||||
|
||||
server.expect(
|
||||
Expectation::matching(matchers::any())
|
||||
.times(0)
|
||||
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
|
||||
);
|
||||
|
||||
let asset = DownloadableAsset {
|
||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||
file_name: "test.txt".to_string(),
|
||||
checksum: None,
|
||||
};
|
||||
|
||||
let target_file_path = folder.join(&asset.file_name);
|
||||
std::fs::write(&target_file_path, TEST_CONTENT).unwrap();
|
||||
|
||||
let result = asset.download_to_path(folder).await.unwrap();
|
||||
let content = std::fs::read_to_string(result).unwrap();
|
||||
assert_eq!(content, TEST_CONTENT);
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
mod downloadable_asset;
|
||||
use downloadable_asset::*;
|
||||
|
||||
use kube::Client;
|
||||
use log::{debug, info};
|
||||
use std::{ffi::OsStr, path::PathBuf};
|
||||
|
||||
use crate::downloadable_asset::DownloadableAsset;
|
||||
|
||||
const K3D_BIN_FILE_NAME: &str = "k3d";
|
||||
|
||||
pub struct K3d {
|
||||
@@ -78,6 +77,7 @@ impl K3d {
|
||||
|
||||
debug!("Found binary at {} with checksum {}", binary_url, checksum);
|
||||
|
||||
let checksum = Some(checksum);
|
||||
DownloadableAsset {
|
||||
url: binary_url,
|
||||
file_name: K3D_BIN_FILE_NAME.to_string(),
|
||||
@@ -399,7 +399,7 @@ mod test {
|
||||
use regex::Regex;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::{K3d, K3D_BIN_FILE_NAME};
|
||||
use crate::{k3d::K3D_BIN_FILE_NAME, K3d};
|
||||
|
||||
#[tokio::test]
|
||||
async fn k3d_latest_release_should_get_latest() {
|
||||
6
harmony_tools/src/lib.rs
Normal file
6
harmony_tools/src/lib.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
mod docker;
|
||||
mod downloadable_asset;
|
||||
mod k3d;
|
||||
pub use docker::*;
|
||||
use downloadable_asset::*;
|
||||
pub use k3d::*;
|
||||
Reference in New Issue
Block a user