Compare commits
4 Commits
feat/docke
...
feat/nats
| Author | SHA1 | Date | |
|---|---|---|---|
| 270b6b87df | |||
| 6933280575 | |||
| 77583a1ad1 | |||
| f7404bed36 |
@@ -10,7 +10,7 @@ members = [
|
|||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
"harmony_cli",
|
"harmony_cli",
|
||||||
"harmony_tools",
|
"k3d",
|
||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
|
|||||||
@@ -3,15 +3,58 @@ use std::str::FromStr;
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{HelmCommand, K8sAnywhereConfig, K8sAnywhereTopology, TlsRouter, Topology},
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
// env_logger::init();
|
let site1_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
|
||||||
let values_yaml = Some(
|
"HARMONY_NATS_SITE_1",
|
||||||
|
));
|
||||||
|
let site2_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
|
||||||
|
"HARMONY_NATS_SITE_2",
|
||||||
|
));
|
||||||
|
|
||||||
|
let site1_domain = site1_topo.get_internal_domain().await.unwrap().unwrap();
|
||||||
|
let site2_domain = site2_topo.get_internal_domain().await.unwrap().unwrap();
|
||||||
|
|
||||||
|
let site1_gateway = format!("nats-gateway.{}", site1_domain);
|
||||||
|
let site2_gateway = format!("nats-gateway.{}", site2_domain);
|
||||||
|
|
||||||
|
tokio::join!(
|
||||||
|
deploy_nats(
|
||||||
|
site1_topo,
|
||||||
|
"site-1",
|
||||||
|
vec![("site-2".to_string(), site2_gateway)]
|
||||||
|
),
|
||||||
|
deploy_nats(
|
||||||
|
site2_topo,
|
||||||
|
"site-2",
|
||||||
|
vec![("site-1".to_string(), site1_gateway)]
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn deploy_nats<T: Topology + HelmCommand + TlsRouter + 'static>(
|
||||||
|
topology: T,
|
||||||
|
cluster_name: &str,
|
||||||
|
remote_gateways: Vec<(String, String)>,
|
||||||
|
) {
|
||||||
|
topology.ensure_ready().await.unwrap();
|
||||||
|
|
||||||
|
let mut gateway_gateways = String::new();
|
||||||
|
for (name, url) in remote_gateways {
|
||||||
|
gateway_gateways.push_str(&format!(
|
||||||
|
r#"
|
||||||
|
- name: {name}
|
||||||
|
urls:
|
||||||
|
- nats://{url}:7222"#
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let values_yaml = Some(format!(
|
||||||
r#"config:
|
r#"config:
|
||||||
cluster:
|
cluster:
|
||||||
enabled: true
|
enabled: true
|
||||||
@@ -25,16 +68,31 @@ async fn main() {
|
|||||||
leafnodes:
|
leafnodes:
|
||||||
enabled: false
|
enabled: false
|
||||||
# port: 7422
|
# port: 7422
|
||||||
|
websocket:
|
||||||
|
enabled: true
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: openshift-default
|
||||||
|
pathType: Prefix
|
||||||
|
hosts:
|
||||||
|
- nats-ws.{}
|
||||||
gateway:
|
gateway:
|
||||||
enabled: false
|
enabled: true
|
||||||
# name: my-gateway
|
name: {}
|
||||||
# port: 7522
|
port: 7222
|
||||||
|
gateways: {}
|
||||||
|
service:
|
||||||
|
ports:
|
||||||
|
gateway:
|
||||||
|
enabled: true
|
||||||
natsBox:
|
natsBox:
|
||||||
container:
|
container:
|
||||||
image:
|
image:
|
||||||
tag: nonroot"#
|
tag: nonroot"#,
|
||||||
.to_string(),
|
topology.get_internal_domain().await.unwrap().unwrap(),
|
||||||
);
|
cluster_name,
|
||||||
|
gateway_gateways,
|
||||||
|
));
|
||||||
let namespace = "nats";
|
let namespace = "nats";
|
||||||
let nats = HelmChartScore {
|
let nats = HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
|
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
|
||||||
@@ -52,14 +110,9 @@ natsBox:
|
|||||||
)),
|
)),
|
||||||
};
|
};
|
||||||
|
|
||||||
harmony_cli::run(
|
harmony_cli::run(Inventory::autoload(), topology, vec![Box::new(nats)], None)
|
||||||
Inventory::autoload(),
|
.await
|
||||||
K8sAnywhereTopology::from_env(),
|
.unwrap();
|
||||||
vec![Box::new(nats)],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Enjoy! You can test your nats cluster by running : `kubectl exec -n {namespace} -it deployment/nats-box -- nats pub test hi`"
|
"Enjoy! You can test your nats cluster by running : `kubectl exec -n {namespace} -it deployment/nats-box -- nats pub test hi`"
|
||||||
|
|||||||
@@ -9,14 +9,6 @@ license.workspace = true
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
opnsense-config = { path = "../opnsense-config" }
|
|
||||||
opnsense-config-xml = { path = "../opnsense-config-xml" }
|
|
||||||
harmony_macros = { path = "../harmony_macros" }
|
|
||||||
harmony_types = { path = "../harmony_types" }
|
|
||||||
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
|
||||||
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
|
||||||
harmony_secret = { path = "../harmony_secret" }
|
|
||||||
harmony_tools = { path = "../harmony_tools" }
|
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
reqwest = { version = "0.11", features = [
|
reqwest = { version = "0.11", features = [
|
||||||
"blocking",
|
"blocking",
|
||||||
@@ -34,6 +26,10 @@ log.workspace = true
|
|||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
cidr.workspace = true
|
cidr.workspace = true
|
||||||
|
opnsense-config = { path = "../opnsense-config" }
|
||||||
|
opnsense-config-xml = { path = "../opnsense-config-xml" }
|
||||||
|
harmony_macros = { path = "../harmony_macros" }
|
||||||
|
harmony_types = { path = "../harmony_types" }
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
kube = { workspace = true, features = ["derive"] }
|
kube = { workspace = true, features = ["derive"] }
|
||||||
@@ -43,6 +39,7 @@ http.workspace = true
|
|||||||
serde-value.workspace = true
|
serde-value.workspace = true
|
||||||
helm-wrapper-rs = "0.4.0"
|
helm-wrapper-rs = "0.4.0"
|
||||||
non-blank-string-rs = "1.0.4"
|
non-blank-string-rs = "1.0.4"
|
||||||
|
k3d-rs = { path = "../k3d" }
|
||||||
directories.workspace = true
|
directories.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
dockerfile_builder = "0.1.5"
|
dockerfile_builder = "0.1.5"
|
||||||
@@ -74,6 +71,9 @@ base64.workspace = true
|
|||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
walkdir = "2.5.0"
|
walkdir = "2.5.0"
|
||||||
|
harmony_inventory_agent = { path = "../harmony_inventory_agent" }
|
||||||
|
harmony_secret_derive = { path = "../harmony_secret_derive" }
|
||||||
|
harmony_secret = { path = "../harmony_secret" }
|
||||||
askama.workspace = true
|
askama.workspace = true
|
||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Docker Capability
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Docker {
|
|
||||||
async fn ensure_installed(&self) -> Result<(), String>;
|
|
||||||
fn get_docker_env(&self) -> HashMap<String, String>;
|
|
||||||
fn docker_command(&self) -> std::process::Command;
|
|
||||||
}
|
|
||||||
@@ -1 +1,5 @@
|
|||||||
pub trait HelmCommand {}
|
use std::process::Command;
|
||||||
|
|
||||||
|
pub trait HelmCommand {
|
||||||
|
fn get_helm_command(&self) -> Command;
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use kube::{
|
|||||||
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||||
},
|
},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::{DynamicResourceScope, ErrorResponse},
|
||||||
discovery::{ApiCapabilities, Scope},
|
discovery::{ApiCapabilities, Scope},
|
||||||
error::DiscoveryError,
|
error::DiscoveryError,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
|
|||||||
@@ -1,13 +1,7 @@
|
|||||||
use std::{
|
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||||
collections::{BTreeMap, HashMap},
|
|
||||||
process::Command,
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine, engine::general_purpose};
|
use base64::{Engine, engine::general_purpose};
|
||||||
use harmony_tools::K3d;
|
|
||||||
use harmony_types::rfc1123::Rfc1123Name;
|
use harmony_types::rfc1123::Rfc1123Name;
|
||||||
use k8s_openapi::api::{
|
use k8s_openapi::api::{
|
||||||
core::v1::Secret,
|
core::v1::Secret,
|
||||||
@@ -19,12 +13,10 @@ use serde::Serialize;
|
|||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::HARMONY_DATA_DIR,
|
|
||||||
executors::ExecutorError,
|
executors::ExecutorError,
|
||||||
interpret::InterpretStatus,
|
interpret::InterpretStatus,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
docker::DockerInstallationScore,
|
|
||||||
k3d::K3DInstallationScore,
|
k3d::K3DInstallationScore,
|
||||||
k8s::ingress::{K8sIngressScore, PathType},
|
k8s::ingress::{K8sIngressScore, PathType},
|
||||||
monitoring::{
|
monitoring::{
|
||||||
@@ -43,6 +35,7 @@ use crate::{
|
|||||||
service_monitor::ServiceMonitor,
|
service_monitor::ServiceMonitor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
okd::crd::ingresses_config::Ingress as IngressResource,
|
||||||
okd::route::OKDTlsPassthroughScore,
|
okd::route::OKDTlsPassthroughScore,
|
||||||
prometheus::{
|
prometheus::{
|
||||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
||||||
@@ -50,7 +43,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{Docker, TlsRoute, TlsRouter, ingress::Ingress},
|
topology::{TlsRoute, TlsRouter, ingress::Ingress},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::super::{
|
use super::super::{
|
||||||
@@ -115,8 +108,32 @@ impl K8sclient for K8sAnywhereTopology {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl TlsRouter for K8sAnywhereTopology {
|
impl TlsRouter for K8sAnywhereTopology {
|
||||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {
|
async fn get_internal_domain(&self) -> Result<Option<String>, String> {
|
||||||
todo!()
|
match self.get_k8s_distribution().await.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Could not get internal domain, error getting k8s distribution : {}",
|
||||||
|
e.to_string()
|
||||||
|
)
|
||||||
|
})? {
|
||||||
|
KubernetesDistribution::OpenshiftFamily => {
|
||||||
|
let client = self.k8s_client().await?;
|
||||||
|
if let Some(ingress_config) = client
|
||||||
|
.get_resource::<IngressResource>("cluster", None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
format!("Error attempting to get ingress config : {}", e.to_string())
|
||||||
|
})?
|
||||||
|
{
|
||||||
|
debug!("Found ingress config {:?}", ingress_config.spec);
|
||||||
|
Ok(ingress_config.spec.domain.clone())
|
||||||
|
} else {
|
||||||
|
warn!("Could not find a domain configured in this cluster");
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KubernetesDistribution::K3sFamily => todo!(),
|
||||||
|
KubernetesDistribution::Default => todo!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the port that this router exposes externally.
|
/// Returns the port that this router exposes externally.
|
||||||
@@ -358,24 +375,6 @@ impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Docker for K8sAnywhereTopology {
|
|
||||||
async fn ensure_installed(&self) -> Result<(), String> {
|
|
||||||
DockerInstallationScore::default()
|
|
||||||
.interpret(&Inventory::empty(), self)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Could not ensure docker is installed : {e}"))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
fn get_docker_env(&self) -> HashMap<String, String> {
|
|
||||||
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).get_docker_env()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn docker_command(&self) -> std::process::Command {
|
|
||||||
harmony_tools::Docker::new(HARMONY_DATA_DIR.join("docker")).command()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for K8sAnywhereTopology {
|
impl Serialize for K8sAnywhereTopology {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
@@ -763,7 +762,7 @@ impl K8sAnywhereTopology {
|
|||||||
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
|
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
|
||||||
// good implementation due to the stateful nature of the k3d thing. Which is why I went
|
// good implementation due to the stateful nature of the k3d thing. Which is why I went
|
||||||
// with this solution for now
|
// with this solution for now
|
||||||
let k3d = K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
||||||
let state = match k3d.get_client().await {
|
let state = match k3d.get_client().await {
|
||||||
Ok(client) => K8sState {
|
Ok(client) => K8sState {
|
||||||
client: Arc::new(K8sClient::new(client)),
|
client: Arc::new(K8sClient::new(client)),
|
||||||
@@ -1113,7 +1112,21 @@ impl MultiTargetTopology for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HelmCommand for K8sAnywhereTopology {}
|
impl HelmCommand for K8sAnywhereTopology {
|
||||||
|
fn get_helm_command(&self) -> Command {
|
||||||
|
let mut cmd = Command::new("helm");
|
||||||
|
if let Some(k) = &self.config.kubeconfig {
|
||||||
|
cmd.args(["--kubeconfig", k]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(c) = &self.config.k8s_context {
|
||||||
|
cmd.args(["--kube-context", c]);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Using helm command {cmd:?}");
|
||||||
|
cmd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl TenantManager for K8sAnywhereTopology {
|
impl TenantManager for K8sAnywhereTopology {
|
||||||
@@ -1134,7 +1147,7 @@ impl TenantManager for K8sAnywhereTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Ingress for K8sAnywhereTopology {
|
impl Ingress for K8sAnywhereTopology {
|
||||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||||
use log::{debug, trace, warn};
|
use log::{trace, warn};
|
||||||
|
|
||||||
let client = self.k8s_client().await?;
|
let client = self.k8s_client().await?;
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use async_trait::async_trait;
|
|||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
|
use super::{PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
#[derive(new, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct LocalhostTopology;
|
pub struct LocalhostTopology;
|
||||||
@@ -19,6 +19,3 @@ impl Topology for LocalhostTopology {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Delete this, temp for test
|
|
||||||
impl HelmCommand for LocalhostTopology {}
|
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
mod docker;
|
|
||||||
mod failover;
|
mod failover;
|
||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
pub mod node_exporter;
|
pub mod node_exporter;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
pub use docker::*;
|
|
||||||
pub use failover::*;
|
pub use failover::*;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
|
|||||||
@@ -112,12 +112,13 @@ pub trait TlsRouter: Send + Sync {
|
|||||||
/// HAProxy frontend→backend \"postgres-upstream\".
|
/// HAProxy frontend→backend \"postgres-upstream\".
|
||||||
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
||||||
|
|
||||||
/// Gets the base domain that can be used to deploy applications that will be automatically
|
/// Gets the base domain of this cluster. On openshift family clusters, this is the domain
|
||||||
/// routed to this cluster.
|
/// used by default for all components, including the default ingress controller that
|
||||||
|
/// transforms ingress to routes.
|
||||||
///
|
///
|
||||||
/// For example, if we have *.apps.nationtech.io pointing to a public load balancer, then this
|
/// For example, get_internal_domain on a cluster that has `console-openshift-console.apps.mycluster.something`
|
||||||
/// function would install route apps.nationtech.io
|
/// will return `apps.mycluster.something`
|
||||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String>;
|
async fn get_internal_domain(&self) -> Result<Option<String>, String>;
|
||||||
|
|
||||||
/// Returns the port that this router exposes externally.
|
/// Returns the port that this router exposes externally.
|
||||||
async fn get_router_port(&self) -> u16;
|
async fn get_router_port(&self) -> u16;
|
||||||
|
|||||||
@@ -1,79 +0,0 @@
|
|||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use log::debug;
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
config::HARMONY_DATA_DIR,
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{Docker, Topology},
|
|
||||||
};
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct DockerInstallationScore {
|
|
||||||
pub installation_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for DockerInstallationScore {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
installation_path: HARMONY_DATA_DIR.join("docker"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + Docker> Score<T> for DockerInstallationScore {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(DockerInstallationInterpret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"DockerInstallationScore".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct DockerInstallationInterpret {
|
|
||||||
score: DockerInstallationScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + Docker> Interpret<T> for DockerInstallationInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let docker = harmony_tools::Docker::new(self.score.installation_path.clone());
|
|
||||||
|
|
||||||
match docker.ensure_installed().await {
|
|
||||||
Ok(_) => {
|
|
||||||
let msg = "Docker is installed and ready".to_string();
|
|
||||||
debug!("{msg}");
|
|
||||||
Ok(Outcome::success(msg))
|
|
||||||
}
|
|
||||||
Err(msg) => Err(InterpretError::new(format!(
|
|
||||||
"failed to ensure docker is installed : {msg}"
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("DockerInstallation")
|
|
||||||
}
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,15 +6,11 @@ use crate::topology::{HelmCommand, Topology};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
use helm_wrapper_rs;
|
|
||||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
pub use non_blank_string_rs::NonBlankString;
|
pub use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::process::{Output, Stdio};
|
||||||
use std::process::{Command, Output, Stdio};
|
|
||||||
use std::str::FromStr;
|
|
||||||
use temp_file::TempFile;
|
use temp_file::TempFile;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
@@ -65,7 +61,7 @@ pub struct HelmChartInterpret {
|
|||||||
pub score: HelmChartScore,
|
pub score: HelmChartScore,
|
||||||
}
|
}
|
||||||
impl HelmChartInterpret {
|
impl HelmChartInterpret {
|
||||||
fn add_repo(&self) -> Result<(), InterpretError> {
|
fn add_repo<T: HelmCommand>(&self, topology: &T) -> Result<(), InterpretError> {
|
||||||
let repo = match &self.score.repository {
|
let repo = match &self.score.repository {
|
||||||
Some(repo) => repo,
|
Some(repo) => repo,
|
||||||
None => {
|
None => {
|
||||||
@@ -84,7 +80,7 @@ impl HelmChartInterpret {
|
|||||||
add_args.push("--force-update");
|
add_args.push("--force-update");
|
||||||
}
|
}
|
||||||
|
|
||||||
let add_output = run_helm_command(&add_args)?;
|
let add_output = run_helm_command(topology, &add_args)?;
|
||||||
let full_output = format!(
|
let full_output = format!(
|
||||||
"{}\n{}",
|
"{}\n{}",
|
||||||
String::from_utf8_lossy(&add_output.stdout),
|
String::from_utf8_lossy(&add_output.stdout),
|
||||||
@@ -100,23 +96,19 @@ impl HelmChartInterpret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
fn run_helm_command<T: HelmCommand>(topology: &T, args: &[&str]) -> Result<Output, InterpretError> {
|
||||||
let command_str = format!("helm {}", args.join(" "));
|
let mut helm_cmd = topology.get_helm_command();
|
||||||
debug!(
|
helm_cmd.args(args);
|
||||||
"Got KUBECONFIG: `{}`",
|
|
||||||
std::env::var("KUBECONFIG").unwrap_or("".to_string())
|
|
||||||
);
|
|
||||||
debug!("Running Helm command: `{}`", command_str);
|
|
||||||
|
|
||||||
let output = Command::new("helm")
|
debug!("Running Helm command: `{:?}`", helm_cmd);
|
||||||
.args(args)
|
|
||||||
|
let output = helm_cmd
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.output()
|
.output()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
InterpretError::new(format!(
|
InterpretError::new(format!(
|
||||||
"Failed to execute helm command '{}': {}. Is helm installed and in PATH?",
|
"Failed to execute helm command '{helm_cmd:?}': {e}. Is helm installed and in PATH?",
|
||||||
command_str, e
|
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -124,13 +116,13 @@ fn run_helm_command(args: &[&str]) -> Result<Output, InterpretError> {
|
|||||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
warn!(
|
warn!(
|
||||||
"Helm command `{}` failed with status: {}\nStdout:\n{}\nStderr:\n{}",
|
"Helm command `{helm_cmd:?}` failed with status: {}\nStdout:\n{stdout}\nStderr:\n{stderr}",
|
||||||
command_str, output.status, stdout, stderr
|
output.status
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
"Helm command `{}` finished successfully. Status: {}",
|
"Helm command `{helm_cmd:?}` finished successfully. Status: {}",
|
||||||
command_str, output.status
|
output.status
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,7 +134,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
_topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let ns = self
|
let ns = self
|
||||||
.score
|
.score
|
||||||
@@ -150,98 +142,62 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
||||||
|
|
||||||
let tf: TempFile;
|
self.add_repo(topology)?;
|
||||||
let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() {
|
|
||||||
Some(yaml_str) => {
|
let mut args = if self.score.install_only {
|
||||||
tf = temp_file::with_contents(yaml_str.as_bytes());
|
vec!["install"]
|
||||||
debug!(
|
} else {
|
||||||
"values yaml string for chart {} :\n {yaml_str}",
|
vec!["upgrade", "--install"]
|
||||||
self.score.chart_name
|
|
||||||
);
|
|
||||||
Some(tf.path())
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.add_repo()?;
|
args.extend(vec![
|
||||||
|
|
||||||
let helm_executor = DefaultHelmExecutor::new_with_opts(
|
|
||||||
&NonBlankString::from_str("helm").unwrap(),
|
|
||||||
None,
|
|
||||||
900,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut helm_options = Vec::new();
|
|
||||||
if self.score.create_namespace {
|
|
||||||
helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.score.install_only {
|
|
||||||
let chart_list = match helm_executor.list(Some(ns)) {
|
|
||||||
Ok(charts) => charts,
|
|
||||||
Err(e) => {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Failed to list scores in namespace {:?} because of error : {}",
|
|
||||||
self.score.namespace, e
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if chart_list
|
|
||||||
.iter()
|
|
||||||
.any(|item| item.name == self.score.release_name.to_string())
|
|
||||||
{
|
|
||||||
info!(
|
|
||||||
"Release '{}' already exists in namespace '{}'. Skipping installation as install_only is true.",
|
|
||||||
self.score.release_name, ns
|
|
||||||
);
|
|
||||||
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"Helm Chart '{}' already installed to namespace {ns} and install_only=true",
|
|
||||||
self.score.release_name
|
|
||||||
)));
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"Release '{}' not found in namespace '{}'. Proceeding with installation.",
|
|
||||||
self.score.release_name, ns
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = helm_executor.install_or_upgrade(
|
|
||||||
ns,
|
|
||||||
&self.score.release_name,
|
&self.score.release_name,
|
||||||
&self.score.chart_name,
|
&self.score.chart_name,
|
||||||
self.score.chart_version.as_ref(),
|
"--namespace",
|
||||||
self.score.values_overrides.as_ref(),
|
&ns,
|
||||||
yaml_path,
|
]);
|
||||||
Some(&helm_options),
|
|
||||||
);
|
|
||||||
|
|
||||||
let status = match res {
|
if self.score.create_namespace {
|
||||||
Ok(status) => status,
|
args.push("--create-namespace");
|
||||||
Err(err) => return Err(InterpretError::new(err.to_string())),
|
}
|
||||||
};
|
|
||||||
|
|
||||||
match status {
|
if let Some(version) = &self.score.chart_version {
|
||||||
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!(
|
args.push("--version");
|
||||||
|
args.push(&version);
|
||||||
|
}
|
||||||
|
|
||||||
|
let tf: TempFile;
|
||||||
|
if let Some(yaml_str) = &self.score.values_yaml {
|
||||||
|
tf = temp_file::with_contents(yaml_str.as_bytes());
|
||||||
|
args.push("--values");
|
||||||
|
args.push(tf.path().to_str().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
let overrides_strings: Vec<String>;
|
||||||
|
if let Some(overrides) = &self.score.values_overrides {
|
||||||
|
overrides_strings = overrides
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| format!("{key}={value}"))
|
||||||
|
.collect();
|
||||||
|
for o in overrides_strings.iter() {
|
||||||
|
args.push("--set");
|
||||||
|
args.push(&o);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let output = run_helm_command(topology, &args)?;
|
||||||
|
|
||||||
|
if output.status.success() {
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
"Helm Chart {} deployed",
|
"Helm Chart {} deployed",
|
||||||
self.score.release_name
|
self.score.release_name
|
||||||
))),
|
)))
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!(
|
} else {
|
||||||
"Helm Chart {} pending install...",
|
Err(InterpretError::new(format!(
|
||||||
self.score.release_name
|
"Helm Chart {} installation failed: {}",
|
||||||
))),
|
self.score.release_name,
|
||||||
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!(
|
String::from_utf8_lossy(&output.stderr)
|
||||||
"Helm Chart {} pending upgrade...",
|
)))
|
||||||
self.score.release_name
|
|
||||||
))),
|
|
||||||
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
|
|
||||||
"Helm Chart {} installation failed",
|
|
||||||
self.score.release_name
|
|
||||||
))),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_tools::K3d;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
@@ -11,7 +10,7 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{Docker, Topology},
|
topology::Topology,
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
@@ -30,7 +29,7 @@ impl Default for K3DInstallationScore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + Docker> Score<T> for K3DInstallationScore {
|
impl<T: Topology> Score<T> for K3DInstallationScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(K3dInstallationInterpret {
|
Box::new(K3dInstallationInterpret {
|
||||||
score: self.clone(),
|
score: self.clone(),
|
||||||
@@ -48,25 +47,19 @@ pub struct K3dInstallationInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + Docker> Interpret<T> for K3dInstallationInterpret {
|
impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
topology: &T,
|
_topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let k3d = K3d::new(
|
let k3d = k3d_rs::K3d::new(
|
||||||
self.score.installation_path.clone(),
|
self.score.installation_path.clone(),
|
||||||
Some(self.score.cluster_name.clone()),
|
Some(self.score.cluster_name.clone()),
|
||||||
);
|
);
|
||||||
|
|
||||||
Docker::ensure_installed(topology)
|
|
||||||
.await
|
|
||||||
.map_err(|e| InterpretError::new(format!("Docker requirement for k3d failed: {e}")))?;
|
|
||||||
|
|
||||||
match k3d.ensure_installed().await {
|
match k3d.ensure_installed().await {
|
||||||
Ok(_client) => {
|
Ok(_client) => {
|
||||||
// Ensure Docker is also ready as k3d depends on it
|
|
||||||
|
|
||||||
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
|
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
|
||||||
debug!("{msg}");
|
debug!("{msg}");
|
||||||
Ok(Outcome::success(msg))
|
Ok(Outcome::success(msg))
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ pub mod brocade;
|
|||||||
pub mod cert_manager;
|
pub mod cert_manager;
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod docker;
|
|
||||||
pub mod dummy;
|
pub mod dummy;
|
||||||
pub mod helm;
|
pub mod helm;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use crate::topology::{FailoverTopology, TlsRoute, TlsRouter};
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: TlsRouter> TlsRouter for FailoverTopology<T> {
|
impl<T: TlsRouter> TlsRouter for FailoverTopology<T> {
|
||||||
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {
|
async fn get_internal_domain(&self) -> Result<Option<String>, String> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
214
harmony/src/modules/okd/crd/ingresses_config.rs
Normal file
214
harmony/src/modules/okd/crd/ingresses_config.rs
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta};
|
||||||
|
use k8s_openapi::{ClusterResourceScope, Resource};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Ingress {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub api_version: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub kind: Option<String>,
|
||||||
|
pub metadata: ObjectMeta,
|
||||||
|
|
||||||
|
pub spec: IngressSpec,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub status: Option<IngressStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resource for Ingress {
|
||||||
|
const API_VERSION: &'static str = "config.openshift.io/v1";
|
||||||
|
const GROUP: &'static str = "config.openshift.io";
|
||||||
|
const VERSION: &'static str = "v1";
|
||||||
|
const KIND: &'static str = "Ingress";
|
||||||
|
const URL_PATH_SEGMENT: &'static str = "ingresses";
|
||||||
|
type Scope = ClusterResourceScope;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl k8s_openapi::Metadata for Ingress {
|
||||||
|
type Ty = ObjectMeta;
|
||||||
|
|
||||||
|
fn metadata(&self) -> &Self::Ty {
|
||||||
|
&self.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||||
|
&mut self.metadata
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Ingress {
|
||||||
|
fn default() -> Self {
|
||||||
|
Ingress {
|
||||||
|
api_version: Some("config.openshift.io/v1".to_string()),
|
||||||
|
kind: Some("Ingress".to_string()),
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: IngressSpec::default(),
|
||||||
|
status: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct IngressList {
|
||||||
|
pub metadata: ListMeta,
|
||||||
|
pub items: Vec<Ingress>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IngressList {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metadata: ListMeta::default(),
|
||||||
|
items: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resource for IngressList {
|
||||||
|
const API_VERSION: &'static str = "config.openshift.io/v1";
|
||||||
|
const GROUP: &'static str = "config.openshift.io";
|
||||||
|
const VERSION: &'static str = "v1";
|
||||||
|
const KIND: &'static str = "IngressList";
|
||||||
|
const URL_PATH_SEGMENT: &'static str = "ingresses";
|
||||||
|
type Scope = ClusterResourceScope;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl k8s_openapi::Metadata for IngressList {
|
||||||
|
type Ty = ListMeta;
|
||||||
|
|
||||||
|
fn metadata(&self) -> &Self::Ty {
|
||||||
|
&self.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||||
|
&mut self.metadata
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct IngressSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub apps_domain: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub component_routes: Option<Vec<ComponentRouteSpec>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub domain: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub load_balancer: Option<LoadBalancer>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub required_hsts_policies: Option<Vec<RequiredHSTSPolicy>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ComponentRouteSpec {
|
||||||
|
pub hostname: String,
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub serving_cert_key_pair_secret: Option<SecretNameReference>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SecretNameReference {
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct LoadBalancer {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub platform: Option<IngressPlatform>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct IngressPlatform {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub aws: Option<AWSPlatformLoadBalancer>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub r#type: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct AWSPlatformLoadBalancer {
|
||||||
|
pub r#type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RequiredHSTSPolicy {
|
||||||
|
pub domain_patterns: Vec<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub include_sub_domains_policy: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub max_age: Option<MaxAgePolicy>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub namespace_selector: Option<k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub preload_policy: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct MaxAgePolicy {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub largest_max_age: Option<i32>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub smallest_max_age: Option<i32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct IngressStatus {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub component_routes: Option<Vec<ComponentRouteStatus>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub default_placement: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ComponentRouteStatus {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub conditions: Option<Vec<k8s_openapi::apimachinery::pkg::apis::meta::v1::Condition>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub consuming_users: Option<Vec<String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub current_hostnames: Option<Vec<String>>,
|
||||||
|
|
||||||
|
pub default_hostname: String,
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
pub related_objects: Vec<ObjectReference>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ObjectReference {
|
||||||
|
pub group: String,
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
pub resource: String,
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,2 +1,3 @@
|
|||||||
pub mod nmstate;
|
pub mod nmstate;
|
||||||
pub mod route;
|
pub mod route;
|
||||||
|
pub mod ingresses_config;
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta, Time};
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ListMeta, ObjectMeta, Time};
|
||||||
use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
|
|
||||||
use k8s_openapi::{NamespaceResourceScope, Resource};
|
use k8s_openapi::{NamespaceResourceScope, Resource};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
|||||||
@@ -7,11 +7,14 @@ use harmony::{
|
|||||||
};
|
};
|
||||||
use log::{error, info, log_enabled};
|
use log::{error, info, log_enabled};
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::sync::Mutex;
|
use std::sync::{Mutex, OnceLock};
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
configure_logger();
|
static INITIALIZED: OnceLock<()> = OnceLock::new();
|
||||||
handle_events();
|
INITIALIZED.get_or_init(|| {
|
||||||
|
configure_logger();
|
||||||
|
handle_events();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn configure_logger() {
|
fn configure_logger() {
|
||||||
|
|||||||
@@ -1,326 +0,0 @@
|
|||||||
use crate::downloadable_asset::DownloadableAsset;
|
|
||||||
use inquire::Select;
|
|
||||||
use log::{debug, error, info, trace, warn};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
pub struct Docker {
|
|
||||||
base_dir: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum DockerVariant {
|
|
||||||
Standard,
|
|
||||||
Rootless,
|
|
||||||
Manual,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for DockerVariant {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
DockerVariant::Standard => write!(f, "Standard Docker (requires sudo)"),
|
|
||||||
DockerVariant::Rootless => write!(f, "Rootless Docker (no sudo required)"),
|
|
||||||
DockerVariant::Manual => {
|
|
||||||
write!(f, "Exit and install manually (Docker or podman-docker)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Docker {
|
|
||||||
pub fn new(base_dir: PathBuf) -> Self {
|
|
||||||
Self { base_dir }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Provides the DOCKER_HOST and DOCKER_SOCK env vars for local usage.
|
|
||||||
///
|
|
||||||
/// If a rootless Docker installation is detected in the user's home directory,
|
|
||||||
/// it returns the appropriate `DOCKER_HOST` pointing to the user's Docker socket.
|
|
||||||
/// Otherwise, it returns an empty HashMap, assuming the standard system-wide
|
|
||||||
/// Docker installation is used.
|
|
||||||
pub fn get_docker_env(&self) -> HashMap<String, String> {
|
|
||||||
let mut env = HashMap::new();
|
|
||||||
|
|
||||||
if let Ok(home) = std::env::var("HOME") {
|
|
||||||
let rootless_sock = PathBuf::from(&home).join(".docker/run/docker.sock");
|
|
||||||
let rootless_bin = PathBuf::from(&home).join("bin/docker");
|
|
||||||
|
|
||||||
if rootless_bin.exists() && rootless_sock.exists() {
|
|
||||||
let docker_host = format!("unix://{}", rootless_sock.display());
|
|
||||||
debug!(
|
|
||||||
"Detected rootless Docker, setting DOCKER_HOST={}",
|
|
||||||
docker_host
|
|
||||||
);
|
|
||||||
env.insert("DOCKER_HOST".to_string(), docker_host);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
env
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the path to the docker binary
|
|
||||||
pub fn get_bin_path(&self) -> PathBuf {
|
|
||||||
// Check standard PATH first
|
|
||||||
if let Ok(path) = std::process::Command::new("which")
|
|
||||||
.arg("docker")
|
|
||||||
.output()
|
|
||||||
.map(|o| PathBuf::from(String::from_utf8_lossy(&o.stdout).trim()))
|
|
||||||
{
|
|
||||||
if path.exists() {
|
|
||||||
debug!("Found Docker in PATH: {:?}", path);
|
|
||||||
return path;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check common rootless location
|
|
||||||
if let Ok(home) = std::env::var("HOME") {
|
|
||||||
let rootless_path = PathBuf::from(home).join("bin/docker");
|
|
||||||
if rootless_path.exists() {
|
|
||||||
debug!("Found rootless Docker at: {:?}", rootless_path);
|
|
||||||
return rootless_path;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Docker not found in PATH or rootless location, using 'docker' from PATH");
|
|
||||||
PathBuf::from("docker")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if Docker is installed and the daemon is responsive.
|
|
||||||
pub fn is_installed(&self) -> bool {
|
|
||||||
trace!("Checking if Docker is installed and responsive");
|
|
||||||
|
|
||||||
self.command()
|
|
||||||
.arg("info")
|
|
||||||
.output()
|
|
||||||
.map(|output| {
|
|
||||||
if output.status.success() {
|
|
||||||
trace!("Docker daemon is responsive");
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
trace!(
|
|
||||||
"Docker daemon check failed with status: {:?}",
|
|
||||||
output.status
|
|
||||||
);
|
|
||||||
false
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_err(|e| {
|
|
||||||
trace!("Failed to execute Docker daemon check: {}", e);
|
|
||||||
e
|
|
||||||
})
|
|
||||||
.unwrap_or(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prompts the user to choose an installation method
|
|
||||||
fn prompt_for_installation(&self) -> DockerVariant {
|
|
||||||
let options = vec![
|
|
||||||
DockerVariant::Standard,
|
|
||||||
DockerVariant::Rootless,
|
|
||||||
DockerVariant::Manual,
|
|
||||||
];
|
|
||||||
|
|
||||||
Select::new(
|
|
||||||
"Docker binary was not found. How would you like to proceed?",
|
|
||||||
options,
|
|
||||||
)
|
|
||||||
.with_help_message("Standard requires sudo. Rootless runs in user space.")
|
|
||||||
.prompt()
|
|
||||||
.unwrap_or(DockerVariant::Manual)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Installs docker using the official shell script
|
|
||||||
pub async fn install(&self, variant: DockerVariant) -> Result<(), String> {
|
|
||||||
let (script_url, script_name, use_sudo) = match variant {
|
|
||||||
DockerVariant::Standard => ("https://get.docker.com", "get-docker.sh", true),
|
|
||||||
DockerVariant::Rootless => (
|
|
||||||
"https://get.docker.com/rootless",
|
|
||||||
"get-docker-rootless.sh",
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
DockerVariant::Manual => return Err("Manual installation selected".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("Installing {}...", variant);
|
|
||||||
debug!("Downloading installation script from: {}", script_url);
|
|
||||||
|
|
||||||
// Download the installation script
|
|
||||||
let asset = DownloadableAsset {
|
|
||||||
url: Url::parse(script_url).map_err(|e| {
|
|
||||||
error!("Failed to parse installation script URL: {}", e);
|
|
||||||
format!("Failed to parse installation script URL: {}", e)
|
|
||||||
})?,
|
|
||||||
file_name: script_name.to_string(),
|
|
||||||
checksum: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let downloaded_script = asset
|
|
||||||
.download_to_path(self.base_dir.join("scripts"))
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
error!("Failed to download installation script: {}", e);
|
|
||||||
format!("Failed to download installation script: {}", e)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
debug!("Installation script downloaded to: {:?}", downloaded_script);
|
|
||||||
|
|
||||||
// Execute the installation script
|
|
||||||
let mut cmd = std::process::Command::new("sh");
|
|
||||||
if use_sudo {
|
|
||||||
cmd.arg("sudo").arg("sh");
|
|
||||||
}
|
|
||||||
cmd.arg(&downloaded_script);
|
|
||||||
|
|
||||||
debug!("Executing installation command: {:?}", cmd);
|
|
||||||
|
|
||||||
let status = cmd.status().map_err(|e| {
|
|
||||||
error!("Failed to execute docker installation script: {}", e);
|
|
||||||
format!("Failed to execute docker installation script: {}", e)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if status.success() {
|
|
||||||
info!("{} installed successfully", variant);
|
|
||||||
if variant == DockerVariant::Rootless {
|
|
||||||
info!("Running rootless setup tool to install dependencies and start service...");
|
|
||||||
let mut setup_cmd = std::process::Command::new("sh");
|
|
||||||
|
|
||||||
// Set PATH to include ~/bin where the script was likely installed
|
|
||||||
if let Ok(home) = std::env::var("HOME") {
|
|
||||||
let bin_path = format!("{}/bin", home);
|
|
||||||
if let Ok(current_path) = std::env::var("PATH") {
|
|
||||||
setup_cmd.env("PATH", format!("{}:{}", bin_path, current_path));
|
|
||||||
}
|
|
||||||
setup_cmd.arg(format!("{}/bin/dockerd-rootless-setuptool.sh", home));
|
|
||||||
} else {
|
|
||||||
setup_cmd.arg("dockerd-rootless-setuptool.sh");
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_cmd.arg("install");
|
|
||||||
|
|
||||||
debug!("Executing rootless setup command: {:?}", setup_cmd);
|
|
||||||
let setup_status = setup_cmd.status().map_err(|e| {
|
|
||||||
error!("Failed to execute rootless setup tool: {}", e);
|
|
||||||
format!("Failed to execute rootless setup tool: {}", e)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !setup_status.success() {
|
|
||||||
warn!("Rootless setup tool finished with non-zero exit code. You may need to install 'uidmap' or start the service manually.");
|
|
||||||
}
|
|
||||||
|
|
||||||
warn!("Please follow the instructions above to finish rootless setup (environment variables).");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the installation by running hello-world
|
|
||||||
self.validate_installation()?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
error!(
|
|
||||||
"{} installation script failed with exit code: {:?} \n\nOutput:\n{:?}",
|
|
||||||
variant,
|
|
||||||
status.code(),
|
|
||||||
cmd.output(),
|
|
||||||
);
|
|
||||||
Err(format!("{} installation script failed", variant))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validates the Docker installation by running a test container.
|
|
||||||
///
|
|
||||||
/// This method runs `docker run --rm hello-world` to verify that Docker
|
|
||||||
/// is properly installed and functional.
|
|
||||||
fn validate_installation(&self) -> Result<(), String> {
|
|
||||||
info!("Validating Docker installation by running hello-world container...");
|
|
||||||
|
|
||||||
let output = self
|
|
||||||
.command()
|
|
||||||
.args(["run", "--rm", "hello-world"])
|
|
||||||
.output()
|
|
||||||
.map_err(|e| {
|
|
||||||
error!("Failed to execute hello-world validation: {}", e);
|
|
||||||
format!("Failed to execute hello-world validation: {}", e)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
|
||||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
||||||
if stdout.contains("Hello from Docker!") {
|
|
||||||
info!("Docker installation validated successfully");
|
|
||||||
trace!("Validation output: {}", stdout);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
warn!("Hello-world container ran but expected output not found");
|
|
||||||
debug!("Output was: {}", stdout);
|
|
||||||
Err("Docker validation failed: unexpected output from hello-world".to_string())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
||||||
error!(
|
|
||||||
"Hello-world validation failed with exit code: {:?}",
|
|
||||||
output.status.code()
|
|
||||||
);
|
|
||||||
debug!("Validation stderr: {}", stderr);
|
|
||||||
if !stderr.is_empty() {
|
|
||||||
Err(format!("Docker validation failed: {}", stderr.trim()))
|
|
||||||
} else {
|
|
||||||
Err(
|
|
||||||
"Docker validation failed: hello-world container did not run successfully"
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensures docker is installed, prompting if necessary
|
|
||||||
pub async fn ensure_installed(&self) -> Result<(), String> {
|
|
||||||
if self.is_installed() {
|
|
||||||
debug!("Docker is already installed at: {:?}", self.get_bin_path());
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Docker is not installed, prompting for installation method");
|
|
||||||
match self.prompt_for_installation() {
|
|
||||||
DockerVariant::Manual => {
|
|
||||||
info!("User chose manual installation");
|
|
||||||
Err("Docker installation cancelled by user. Please install docker or podman-docker manually.".to_string())
|
|
||||||
}
|
|
||||||
variant => self.install(variant).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a pre-configured Command for running Docker commands.
|
|
||||||
///
|
|
||||||
/// The returned Command is set up with:
|
|
||||||
/// - The correct Docker binary path (handles rootless installations)
|
|
||||||
/// - Appropriate environment variables (e.g., DOCKER_HOST for rootless)
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```no_run
|
|
||||||
/// # use harmony_tools::Docker;
|
|
||||||
/// # use std::path::PathBuf;
|
|
||||||
/// # let docker = Docker::new(PathBuf::from("."));
|
|
||||||
/// let mut cmd = docker.command();
|
|
||||||
/// cmd.args(["ps", "-a"]);
|
|
||||||
/// // Now cmd is ready to be executed
|
|
||||||
/// ```
|
|
||||||
pub fn command(&self) -> std::process::Command {
|
|
||||||
let bin_path = self.get_bin_path();
|
|
||||||
trace!("Creating Docker command with binary: {:?}", bin_path);
|
|
||||||
|
|
||||||
let mut cmd = std::process::Command::new(&bin_path);
|
|
||||||
|
|
||||||
// Add Docker-specific environment variables
|
|
||||||
let env = self.get_docker_env();
|
|
||||||
if !env.is_empty() {
|
|
||||||
trace!("Setting Docker environment variables: {:?}", env);
|
|
||||||
for (key, value) in env {
|
|
||||||
cmd.env(key, value);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
trace!("No Docker-specific environment variables to set");
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
mod docker;
|
|
||||||
mod downloadable_asset;
|
|
||||||
mod k3d;
|
|
||||||
pub use docker::*;
|
|
||||||
use downloadable_asset::*;
|
|
||||||
pub use k3d::*;
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "harmony_tools"
|
name = "k3d-rs"
|
||||||
description = "Install tools such as k3d, docker and more"
|
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
readme.workspace = true
|
readme.workspace = true
|
||||||
@@ -17,7 +16,6 @@ url.workspace = true
|
|||||||
sha2 = "0.10.8"
|
sha2 = "0.10.8"
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
kube.workspace = true
|
kube.workspace = true
|
||||||
inquire.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
@@ -39,20 +39,11 @@ const CHECKSUM_FAILED_MSG: &str = "Downloaded file failed checksum verification"
|
|||||||
pub(crate) struct DownloadableAsset {
|
pub(crate) struct DownloadableAsset {
|
||||||
pub(crate) url: Url,
|
pub(crate) url: Url,
|
||||||
pub(crate) file_name: String,
|
pub(crate) file_name: String,
|
||||||
pub(crate) checksum: Option<String>,
|
pub(crate) checksum: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DownloadableAsset {
|
impl DownloadableAsset {
|
||||||
fn verify_checksum(&self, file: PathBuf) -> bool {
|
fn verify_checksum(&self, file: PathBuf) -> bool {
|
||||||
// Skip verification if no checksum is provided
|
|
||||||
let expected_checksum = match &self.checksum {
|
|
||||||
Some(checksum) => checksum,
|
|
||||||
None => {
|
|
||||||
debug!("No checksum provided, skipping verification");
|
|
||||||
return file.exists();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if !file.exists() {
|
if !file.exists() {
|
||||||
debug!("File does not exist: {:?}", file);
|
debug!("File does not exist: {:?}", file);
|
||||||
return false;
|
return false;
|
||||||
@@ -85,10 +76,10 @@ impl DownloadableAsset {
|
|||||||
let result = hasher.finalize();
|
let result = hasher.finalize();
|
||||||
let calculated_hash = format!("{:x}", result);
|
let calculated_hash = format!("{:x}", result);
|
||||||
|
|
||||||
debug!("Expected checksum: {}", expected_checksum);
|
debug!("Expected checksum: {}", self.checksum);
|
||||||
debug!("Calculated checksum: {}", calculated_hash);
|
debug!("Calculated checksum: {}", calculated_hash);
|
||||||
|
|
||||||
calculated_hash == *expected_checksum
|
calculated_hash == self.checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Downloads the asset to the specified directory, verifying its checksum.
|
/// Downloads the asset to the specified directory, verifying its checksum.
|
||||||
@@ -160,8 +151,7 @@ impl DownloadableAsset {
|
|||||||
file.flush().await.expect("Failed to flush file");
|
file.flush().await.expect("Failed to flush file");
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
// Only verify checksum if one was provided
|
if !self.verify_checksum(target_file_path.clone()) {
|
||||||
if self.checksum.is_some() && !self.verify_checksum(target_file_path.clone()) {
|
|
||||||
return Err(CHECKSUM_FAILED_MSG.to_string());
|
return Err(CHECKSUM_FAILED_MSG.to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -212,7 +202,7 @@ mod tests {
|
|||||||
let asset = DownloadableAsset {
|
let asset = DownloadableAsset {
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||||
file_name: "test.txt".to_string(),
|
file_name: "test.txt".to_string(),
|
||||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
checksum: TEST_CONTENT_HASH.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = asset
|
let result = asset
|
||||||
@@ -236,7 +226,7 @@ mod tests {
|
|||||||
let asset = DownloadableAsset {
|
let asset = DownloadableAsset {
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||||
file_name: "test.txt".to_string(),
|
file_name: "test.txt".to_string(),
|
||||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
checksum: TEST_CONTENT_HASH.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let target_file_path = folder.join(&asset.file_name);
|
let target_file_path = folder.join(&asset.file_name);
|
||||||
@@ -258,7 +248,7 @@ mod tests {
|
|||||||
let asset = DownloadableAsset {
|
let asset = DownloadableAsset {
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||||
file_name: "test.txt".to_string(),
|
file_name: "test.txt".to_string(),
|
||||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
checksum: TEST_CONTENT_HASH.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = asset.download_to_path(folder.join("error")).await;
|
let result = asset.download_to_path(folder.join("error")).await;
|
||||||
@@ -279,7 +269,7 @@ mod tests {
|
|||||||
let asset = DownloadableAsset {
|
let asset = DownloadableAsset {
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
||||||
file_name: "test.txt".to_string(),
|
file_name: "test.txt".to_string(),
|
||||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
checksum: TEST_CONTENT_HASH.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let join_handle =
|
let join_handle =
|
||||||
@@ -303,58 +293,11 @@ mod tests {
|
|||||||
let asset = DownloadableAsset {
|
let asset = DownloadableAsset {
|
||||||
url: Url::parse(&server.url("/specific/path.txt").to_string()).unwrap(),
|
url: Url::parse(&server.url("/specific/path.txt").to_string()).unwrap(),
|
||||||
file_name: "path.txt".to_string(),
|
file_name: "path.txt".to_string(),
|
||||||
checksum: Some(TEST_CONTENT_HASH.to_string()),
|
checksum: TEST_CONTENT_HASH.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = asset.download_to_path(folder).await.unwrap();
|
let result = asset.download_to_path(folder).await.unwrap();
|
||||||
let downloaded_content = std::fs::read_to_string(result).unwrap();
|
let downloaded_content = std::fs::read_to_string(result).unwrap();
|
||||||
assert_eq!(downloaded_content, TEST_CONTENT);
|
assert_eq!(downloaded_content, TEST_CONTENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_download_without_checksum() {
|
|
||||||
let (folder, server) = setup_test();
|
|
||||||
|
|
||||||
server.expect(
|
|
||||||
Expectation::matching(matchers::any())
|
|
||||||
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let asset = DownloadableAsset {
|
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
|
||||||
file_name: "test.txt".to_string(),
|
|
||||||
checksum: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let result = asset
|
|
||||||
.download_to_path(folder.join("no_checksum"))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let downloaded_content = std::fs::read_to_string(result).unwrap();
|
|
||||||
assert_eq!(downloaded_content, TEST_CONTENT);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_download_without_checksum_already_exists() {
|
|
||||||
let (folder, server) = setup_test();
|
|
||||||
|
|
||||||
server.expect(
|
|
||||||
Expectation::matching(matchers::any())
|
|
||||||
.times(0)
|
|
||||||
.respond_with(responders::status_code(200).body(TEST_CONTENT)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let asset = DownloadableAsset {
|
|
||||||
url: Url::parse(&server.url("/test.txt").to_string()).unwrap(),
|
|
||||||
file_name: "test.txt".to_string(),
|
|
||||||
checksum: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let target_file_path = folder.join(&asset.file_name);
|
|
||||||
std::fs::write(&target_file_path, TEST_CONTENT).unwrap();
|
|
||||||
|
|
||||||
let result = asset.download_to_path(folder).await.unwrap();
|
|
||||||
let content = std::fs::read_to_string(result).unwrap();
|
|
||||||
assert_eq!(content, TEST_CONTENT);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
|
mod downloadable_asset;
|
||||||
|
use downloadable_asset::*;
|
||||||
|
|
||||||
use kube::Client;
|
use kube::Client;
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use std::{ffi::OsStr, path::PathBuf};
|
use std::{ffi::OsStr, path::PathBuf};
|
||||||
|
|
||||||
use crate::downloadable_asset::DownloadableAsset;
|
|
||||||
|
|
||||||
const K3D_BIN_FILE_NAME: &str = "k3d";
|
const K3D_BIN_FILE_NAME: &str = "k3d";
|
||||||
|
|
||||||
pub struct K3d {
|
pub struct K3d {
|
||||||
@@ -77,7 +78,6 @@ impl K3d {
|
|||||||
|
|
||||||
debug!("Found binary at {} with checksum {}", binary_url, checksum);
|
debug!("Found binary at {} with checksum {}", binary_url, checksum);
|
||||||
|
|
||||||
let checksum = Some(checksum);
|
|
||||||
DownloadableAsset {
|
DownloadableAsset {
|
||||||
url: binary_url,
|
url: binary_url,
|
||||||
file_name: K3D_BIN_FILE_NAME.to_string(),
|
file_name: K3D_BIN_FILE_NAME.to_string(),
|
||||||
@@ -399,7 +399,7 @@ mod test {
|
|||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use crate::{k3d::K3D_BIN_FILE_NAME, K3d};
|
use crate::{K3d, K3D_BIN_FILE_NAME};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn k3d_latest_release_should_get_latest() {
|
async fn k3d_latest_release_should_get_latest() {
|
||||||
Reference in New Issue
Block a user