Compare commits
3 Commits
feat/ceph-
...
better-ind
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
387ae9f494 | ||
|
|
336e1cfefe | ||
|
|
403e199062 |
@@ -9,7 +9,7 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
container:
|
container:
|
||||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
package_harmony_composer:
|
package_harmony_composer:
|
||||||
container:
|
container:
|
||||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -45,14 +45,14 @@ jobs:
|
|||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
|
||||||
| jq -r '.id // empty')
|
| jq -r '.id // empty')
|
||||||
|
|
||||||
if [ -n "$RELEASE_ID" ]; then
|
if [ -n "$RELEASE_ID" ]; then
|
||||||
# Delete existing release
|
# Delete existing release
|
||||||
curl -X DELETE \
|
curl -X DELETE \
|
||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create new release
|
# Create new release
|
||||||
RESPONSE=$(curl -X POST \
|
RESPONSE=$(curl -X POST \
|
||||||
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
"prerelease": true
|
"prerelease": true
|
||||||
}' \
|
}' \
|
||||||
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
|
||||||
|
|
||||||
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Upload Linux binary
|
- name: Upload Linux binary
|
||||||
|
|||||||
648
Cargo.lock
generated
648
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@@ -12,9 +12,6 @@ members = [
|
|||||||
"harmony_cli",
|
"harmony_cli",
|
||||||
"k3d",
|
"k3d",
|
||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
|
||||||
"harmony_secret_derive",
|
|
||||||
"harmony_secret",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -23,7 +20,7 @@ readme = "README.md"
|
|||||||
license = "GNU AGPL v3"
|
license = "GNU AGPL v3"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
log = { version = "0.4", features = ["kv"] }
|
log = "0.4"
|
||||||
env_logger = "0.11"
|
env_logger = "0.11"
|
||||||
derive-new = "0.7"
|
derive-new = "0.7"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
@@ -56,12 +53,6 @@ chrono = "0.4"
|
|||||||
similar = "2"
|
similar = "2"
|
||||||
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||||
pretty_assertions = "1.4.1"
|
pretty_assertions = "1.4.1"
|
||||||
tempfile = "3.20.0"
|
|
||||||
bollard = "0.19.1"
|
bollard = "0.19.1"
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
tar = "0.4.44"
|
tar = "0.4.44"
|
||||||
lazy_static = "1.5.0"
|
|
||||||
directories = "6.0.0"
|
|
||||||
thiserror = "2.0.14"
|
|
||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
|
||||||
serde_json = "1.0.127"
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM docker.io/rust:1.89.0 AS build
|
FROM docker.io/rust:1.87.0 AS build
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -6,7 +6,7 @@ COPY . .
|
|||||||
|
|
||||||
RUN cargo build --release --bin harmony_composer
|
RUN cargo build --release --bin harmony_composer
|
||||||
|
|
||||||
FROM docker.io/rust:1.89.0
|
FROM docker.io/rust:1.87.0
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -8,6 +8,7 @@ use harmony::{
|
|||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
ipxe::IpxeScore,
|
ipxe::IpxeScore,
|
||||||
@@ -129,21 +130,16 @@ async fn main() {
|
|||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
let ipxe_score = IpxeScore::new();
|
let ipxe_score = IpxeScore::new();
|
||||||
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
harmony_tui::run(
|
maestro.register_all(vec![
|
||||||
inventory,
|
Box::new(dns_score),
|
||||||
topology,
|
Box::new(bootstrap_dhcp_score),
|
||||||
vec![
|
Box::new(bootstrap_load_balancer_score),
|
||||||
Box::new(dns_score),
|
Box::new(load_balancer_score),
|
||||||
Box::new(bootstrap_dhcp_score),
|
Box::new(tftp_score),
|
||||||
Box::new(bootstrap_load_balancer_score),
|
Box::new(http_score),
|
||||||
Box::new(load_balancer_score),
|
Box::new(ipxe_score),
|
||||||
Box::new(tftp_score),
|
Box::new(dhcp_score),
|
||||||
Box::new(http_score),
|
]);
|
||||||
Box::new(ipxe_score),
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
Box::new(dhcp_score),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ use harmony::{
|
|||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
@@ -83,25 +84,20 @@ async fn main() {
|
|||||||
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
|
||||||
"./data/watchguard/pxe-http-files".to_string(),
|
"./data/watchguard/pxe-http-files".to_string(),
|
||||||
));
|
));
|
||||||
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
harmony_tui::run(
|
maestro.register_all(vec![
|
||||||
inventory,
|
Box::new(dns_score),
|
||||||
topology,
|
Box::new(dhcp_score),
|
||||||
vec![
|
Box::new(load_balancer_score),
|
||||||
Box::new(dns_score),
|
Box::new(tftp_score),
|
||||||
Box::new(dhcp_score),
|
Box::new(http_score),
|
||||||
Box::new(load_balancer_score),
|
Box::new(OPNsenseShellCommandScore {
|
||||||
Box::new(tftp_score),
|
opnsense: opnsense.get_opnsense_config(),
|
||||||
Box::new(http_score),
|
command: "touch /tmp/helloharmonytouching".to_string(),
|
||||||
Box::new(OPNsenseShellCommandScore {
|
}),
|
||||||
opnsense: opnsense.get_opnsense_config(),
|
Box::new(SuccessScore {}),
|
||||||
command: "touch /tmp/helloharmonytouching".to_string(),
|
Box::new(ErrorScore {}),
|
||||||
}),
|
Box::new(PanicScore {}),
|
||||||
Box::new(SuccessScore {}),
|
]);
|
||||||
Box::new(ErrorScore {}),
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
Box::new(PanicScore {}),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
|
|||||||
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
|
maestro::Maestro,
|
||||||
modules::{
|
modules::{
|
||||||
dns::DnsScore,
|
dns::DnsScore,
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
harmony_tui::run(
|
let inventory = Inventory::autoload();
|
||||||
Inventory::autoload(),
|
let topology = DummyInfra {};
|
||||||
DummyInfra {},
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
vec![
|
|
||||||
Box::new(SuccessScore {}),
|
maestro.register_all(vec![
|
||||||
Box::new(ErrorScore {}),
|
Box::new(SuccessScore {}),
|
||||||
Box::new(PanicScore {}),
|
Box::new(ErrorScore {}),
|
||||||
Box::new(DnsScore::new(vec![], None)),
|
Box::new(PanicScore {}),
|
||||||
Box::new(build_large_score()),
|
Box::new(DnsScore::new(vec![], None)),
|
||||||
],
|
Box::new(build_large_score()),
|
||||||
)
|
]);
|
||||||
.await
|
harmony_tui::init(maestro).await.unwrap();
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_large_score() -> LoadBalancerScore {
|
fn build_large_score() -> LoadBalancerScore {
|
||||||
|
|||||||
@@ -5,9 +5,6 @@ version.workspace = true
|
|||||||
readme.workspace = true
|
readme.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[features]
|
|
||||||
testing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rand = "0.9"
|
rand = "0.9"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
@@ -16,8 +13,8 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
|
|||||||
russh = "0.45.0"
|
russh = "0.45.0"
|
||||||
rust-ipmi = "0.1.1"
|
rust-ipmi = "0.1.1"
|
||||||
semver = "1.0.23"
|
semver = "1.0.23"
|
||||||
serde.workspace = true
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json.workspace = true
|
serde_json = "1.0.127"
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
derive-new.workspace = true
|
derive-new.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
@@ -38,8 +35,8 @@ serde-value.workspace = true
|
|||||||
helm-wrapper-rs = "0.4.0"
|
helm-wrapper-rs = "0.4.0"
|
||||||
non-blank-string-rs = "1.0.4"
|
non-blank-string-rs = "1.0.4"
|
||||||
k3d-rs = { path = "../k3d" }
|
k3d-rs = { path = "../k3d" }
|
||||||
directories.workspace = true
|
directories = "6.0.0"
|
||||||
lazy_static.workspace = true
|
lazy_static = "1.5.0"
|
||||||
dockerfile_builder = "0.1.5"
|
dockerfile_builder = "0.1.5"
|
||||||
temp-file = "0.1.9"
|
temp-file = "0.1.9"
|
||||||
convert_case.workspace = true
|
convert_case.workspace = true
|
||||||
@@ -59,7 +56,7 @@ similar.workspace = true
|
|||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
tokio-util = "0.7.15"
|
tokio-util = "0.7.15"
|
||||||
strum = { version = "0.27.1", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
tempfile.workspace = true
|
tempfile = "3.20.0"
|
||||||
serde_with = "3.14.0"
|
serde_with = "3.14.0"
|
||||||
schemars = "0.8.22"
|
schemars = "0.8.22"
|
||||||
kube-derive = "1.1.0"
|
kube-derive = "1.1.0"
|
||||||
@@ -67,7 +64,6 @@ bollard.workspace = true
|
|||||||
tar.workspace = true
|
tar.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
once_cell = "1.21.3"
|
once_cell = "1.21.3"
|
||||||
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
|||||||
Binary file not shown.
@@ -2,8 +2,6 @@ use log::debug;
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
use crate::modules::application::ApplicationFeatureStatus;
|
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
topology::TopologyStatus,
|
topology::TopologyStatus,
|
||||||
@@ -32,12 +30,6 @@ pub enum HarmonyEvent {
|
|||||||
status: TopologyStatus,
|
status: TopologyStatus,
|
||||||
message: Option<String>,
|
message: Option<String>,
|
||||||
},
|
},
|
||||||
ApplicationFeatureStateChanged {
|
|
||||||
topology: String,
|
|
||||||
application: String,
|
|
||||||
feature: String,
|
|
||||||
status: ApplicationFeatureStatus,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
|
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
|
||||||
@@ -47,14 +39,9 @@ static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
|
|||||||
});
|
});
|
||||||
|
|
||||||
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
|
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
|
||||||
if cfg!(any(test, feature = "testing")) {
|
match HARMONY_EVENT_BUS.send(event) {
|
||||||
let _ = event; // Suppress the "unused variable" warning for `event`
|
Ok(_) => Ok(()),
|
||||||
Ok(())
|
Err(_) => Err("send error: no subscribers"),
|
||||||
} else {
|
|
||||||
match HARMONY_EVENT_BUS.send(event) {
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(_) => Err("send error: no subscribers"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -241,7 +241,7 @@ pub struct DummyInfra;
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Topology for DummyInfra {
|
impl Topology for DummyInfra {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"DummyInfra"
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use k8s_openapi::{
|
|||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Error, Resource,
|
Client, Config, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
@@ -17,9 +17,7 @@ use kube::{
|
|||||||
};
|
};
|
||||||
use log::{debug, error, trace};
|
use log::{debug, error, trace};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::json;
|
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::io::AsyncReadExt;
|
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -53,66 +51,6 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_deployment(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<Option<Deployment>, Error> {
|
|
||||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
|
||||||
} else {
|
|
||||||
Api::default_namespaced(self.client.clone())
|
|
||||||
};
|
|
||||||
Ok(deps.get_opt(name).await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
|
||||||
let pods: Api<Pod> = if let Some(ns) = namespace {
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
|
||||||
} else {
|
|
||||||
Api::default_namespaced(self.client.clone())
|
|
||||||
};
|
|
||||||
Ok(pods.get_opt(name).await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn scale_deployment(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
replicas: u32,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let deployments: Api<Deployment> = if let Some(ns) = namespace {
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
|
||||||
} else {
|
|
||||||
Api::default_namespaced(self.client.clone())
|
|
||||||
};
|
|
||||||
|
|
||||||
let patch = json!({
|
|
||||||
"spec": {
|
|
||||||
"replicas": replicas
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let pp = PatchParams::default();
|
|
||||||
let scale = Patch::Apply(&patch);
|
|
||||||
deployments.patch_scale(name, &pp, &scale).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_deployment(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let deployments: Api<Deployment> = if let Some(ns) = namespace {
|
|
||||||
Api::namespaced(self.client.clone(), ns)
|
|
||||||
} else {
|
|
||||||
Api::default_namespaced(self.client.clone())
|
|
||||||
};
|
|
||||||
let delete_params = DeleteParams::default();
|
|
||||||
deployments.delete(name, &delete_params).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn wait_until_deployment_ready(
|
pub async fn wait_until_deployment_ready(
|
||||||
&self,
|
&self,
|
||||||
name: String,
|
name: String,
|
||||||
@@ -138,68 +76,6 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Will execute a commond in the first pod found that matches the specified label
|
|
||||||
/// '{label}={name}'
|
|
||||||
pub async fn exec_app_capture_output(
|
|
||||||
&self,
|
|
||||||
name: String,
|
|
||||||
label: String,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
command: Vec<&str>,
|
|
||||||
) -> Result<String, String> {
|
|
||||||
let api: Api<Pod>;
|
|
||||||
|
|
||||||
if let Some(ns) = namespace {
|
|
||||||
api = Api::namespaced(self.client.clone(), ns);
|
|
||||||
} else {
|
|
||||||
api = Api::default_namespaced(self.client.clone());
|
|
||||||
}
|
|
||||||
let pod_list = api
|
|
||||||
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
|
|
||||||
.await
|
|
||||||
.expect("couldn't get list of pods");
|
|
||||||
|
|
||||||
let res = api
|
|
||||||
.exec(
|
|
||||||
pod_list
|
|
||||||
.items
|
|
||||||
.first()
|
|
||||||
.expect("couldn't get pod")
|
|
||||||
.name()
|
|
||||||
.expect("couldn't get pod name")
|
|
||||||
.into_owned()
|
|
||||||
.as_str(),
|
|
||||||
command,
|
|
||||||
&AttachParams::default().stdout(true).stderr(true),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
match res {
|
|
||||||
Err(e) => Err(e.to_string()),
|
|
||||||
Ok(mut process) => {
|
|
||||||
let status = process
|
|
||||||
.take_status()
|
|
||||||
.expect("Couldn't get status")
|
|
||||||
.await
|
|
||||||
.expect("Couldn't unwrap status");
|
|
||||||
|
|
||||||
if let Some(s) = status.status {
|
|
||||||
let mut stdout_buf = String::new();
|
|
||||||
if let Some(mut stdout) = process.stdout().take() {
|
|
||||||
stdout.read_to_string(&mut stdout_buf).await;
|
|
||||||
}
|
|
||||||
debug!("Status: {} - {:?}", s, status.details);
|
|
||||||
if s == "Success" {
|
|
||||||
Ok(stdout_buf)
|
|
||||||
} else {
|
|
||||||
Err(s)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err("Couldn't get inner status of pod exec".to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Will execute a command in the first pod found that matches the label `app.kubernetes.io/name={name}`
|
/// Will execute a command in the first pod found that matches the label `app.kubernetes.io/name={name}`
|
||||||
pub async fn exec_app(
|
pub async fn exec_app(
|
||||||
&self,
|
&self,
|
||||||
@@ -244,7 +120,7 @@ impl K8sClient {
|
|||||||
.expect("Couldn't unwrap status");
|
.expect("Couldn't unwrap status");
|
||||||
|
|
||||||
if let Some(s) = status.status {
|
if let Some(s) = status.status {
|
||||||
debug!("Status: {} - {:?}", s, status.details);
|
debug!("Status: {}", s);
|
||||||
if s == "Success" { Ok(()) } else { Err(s) }
|
if s == "Success" { Ok(()) } else { Err(s) }
|
||||||
} else {
|
} else {
|
||||||
Err("Couldn't get inner status of pod exec".to_string())
|
Err("Couldn't get inner status of pod exec".to_string())
|
||||||
|
|||||||
@@ -28,13 +28,7 @@ use super::{
|
|||||||
PreparationOutcome, Topology,
|
PreparationOutcome, Topology,
|
||||||
k8s::K8sClient,
|
k8s::K8sClient,
|
||||||
oberservability::monitoring::AlertReceiver,
|
oberservability::monitoring::AlertReceiver,
|
||||||
tenant::{
|
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
|
||||||
TenantConfig, TenantManager,
|
|
||||||
k8s::K8sTenantManager,
|
|
||||||
network_policy::{
|
|
||||||
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -256,21 +250,16 @@ impl K8sAnywhereTopology {
|
|||||||
Ok(Some(state))
|
Ok(Some(state))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
|
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
|
||||||
if self.tenant_manager.get().is_some() {
|
if self.tenant_manager.get().is_some() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
self.tenant_manager
|
self.tenant_manager
|
||||||
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
||||||
|
// TOOD: checker si K8s ou K3d/s tenant manager (ref. issue https://git.nationtech.io/NationTech/harmony/issues/94)
|
||||||
let k8s_client = self.k8s_client().await?;
|
let k8s_client = self.k8s_client().await?;
|
||||||
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
|
Ok(K8sTenantManager::new(k8s_client))
|
||||||
{
|
|
||||||
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
|
|
||||||
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
|
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@@ -401,7 +390,7 @@ impl Topology for K8sAnywhereTopology {
|
|||||||
"no K8s client could be found or installed".to_string(),
|
"no K8s client could be found or installed".to_string(),
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
self.ensure_k8s_tenant_manager(k8s_state)
|
self.ensure_k8s_tenant_manager()
|
||||||
.await
|
.await
|
||||||
.map_err(PreparationError::new)?;
|
.map_err(PreparationError::new)?;
|
||||||
|
|
||||||
|
|||||||
@@ -20,27 +20,24 @@ use serde::de::DeserializeOwned;
|
|||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
|
use super::{TenantConfig, TenantManager};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct K8sTenantManager {
|
pub struct K8sTenantManager {
|
||||||
k8s_client: Arc<K8sClient>,
|
k8s_client: Arc<K8sClient>,
|
||||||
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
|
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
|
||||||
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl K8sTenantManager {
|
impl K8sTenantManager {
|
||||||
pub fn new(
|
pub fn new(client: Arc<K8sClient>) -> Self {
|
||||||
client: Arc<K8sClient>,
|
|
||||||
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
k8s_client: client,
|
k8s_client: client,
|
||||||
k8s_tenant_config: Arc::new(OnceCell::new()),
|
k8s_tenant_config: Arc::new(OnceCell::new()),
|
||||||
network_policy_strategy,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl K8sTenantManager {
|
||||||
fn get_namespace_name(&self, config: &TenantConfig) -> String {
|
fn get_namespace_name(&self, config: &TenantConfig) -> String {
|
||||||
config.name.clone()
|
config.name.clone()
|
||||||
}
|
}
|
||||||
@@ -221,6 +218,29 @@ impl K8sTenantManager {
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"to": [
|
||||||
|
{
|
||||||
|
"ipBlock": {
|
||||||
|
"cidr": "10.43.0.1/32",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"to": [
|
||||||
|
{
|
||||||
|
//TODO this ip is from the docker network that k3d is running on
|
||||||
|
//since k3d does not deploy kube-api-server as a pod it needs to ahve the ip
|
||||||
|
//address opened up
|
||||||
|
//need to find a way to automatically detect the ip address from the docker
|
||||||
|
//network
|
||||||
|
"ipBlock": {
|
||||||
|
"cidr": "172.18.0.0/16",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"to": [
|
"to": [
|
||||||
{
|
{
|
||||||
@@ -390,27 +410,12 @@ impl K8sTenantManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for K8sTenantManager {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self {
|
|
||||||
k8s_client: self.k8s_client.clone(),
|
|
||||||
k8s_tenant_config: self.k8s_tenant_config.clone(),
|
|
||||||
network_policy_strategy: self.network_policy_strategy.clone_box(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl TenantManager for K8sTenantManager {
|
impl TenantManager for K8sTenantManager {
|
||||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
||||||
let namespace = self.build_namespace(config)?;
|
let namespace = self.build_namespace(config)?;
|
||||||
let resource_quota = self.build_resource_quota(config)?;
|
let resource_quota = self.build_resource_quota(config)?;
|
||||||
|
|
||||||
let network_policy = self.build_network_policy(config)?;
|
let network_policy = self.build_network_policy(config)?;
|
||||||
let network_policy = self
|
|
||||||
.network_policy_strategy
|
|
||||||
.adjust_policy(network_policy, config);
|
|
||||||
|
|
||||||
let resource_limit_range = self.build_limit_range(config)?;
|
let resource_limit_range = self.build_limit_range(config)?;
|
||||||
|
|
||||||
self.ensure_constraints(&namespace)?;
|
self.ensure_constraints(&namespace)?;
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
mod manager;
|
mod manager;
|
||||||
pub mod network_policy;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use crate::data::Id;
|
|
||||||
pub use manager::*;
|
pub use manager::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::str::FromStr;
|
|
||||||
|
use crate::data::Id;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
|
||||||
pub struct TenantConfig {
|
pub struct TenantConfig {
|
||||||
|
|||||||
@@ -1,120 +0,0 @@
|
|||||||
use k8s_openapi::api::networking::v1::{
|
|
||||||
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::TenantConfig;
|
|
||||||
|
|
||||||
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
|
|
||||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
|
|
||||||
|
|
||||||
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct NoopNetworkPolicyStrategy {}
|
|
||||||
|
|
||||||
impl NoopNetworkPolicyStrategy {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NoopNetworkPolicyStrategy {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
|
|
||||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
|
|
||||||
policy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct K3dNetworkPolicyStrategy {}
|
|
||||||
|
|
||||||
impl K3dNetworkPolicyStrategy {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for K3dNetworkPolicyStrategy {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
|
|
||||||
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
|
|
||||||
let mut egress = policy
|
|
||||||
.spec
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.egress
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_default();
|
|
||||||
egress.push(NetworkPolicyEgressRule {
|
|
||||||
to: Some(vec![NetworkPolicyPeer {
|
|
||||||
ip_block: Some(IPBlock {
|
|
||||||
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}]),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
NetworkPolicy {
|
|
||||||
spec: Some(NetworkPolicySpec {
|
|
||||||
egress: Some(egress),
|
|
||||||
..policy.spec.unwrap_or_default()
|
|
||||||
}),
|
|
||||||
..policy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use k8s_openapi::api::networking::v1::{
|
|
||||||
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn should_add_ip_block_for_k3d_harmony_server() {
|
|
||||||
let strategy = K3dNetworkPolicyStrategy::new();
|
|
||||||
|
|
||||||
let policy =
|
|
||||||
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
|
|
||||||
|
|
||||||
let expected_policy = NetworkPolicy {
|
|
||||||
spec: Some(NetworkPolicySpec {
|
|
||||||
egress: Some(vec![NetworkPolicyEgressRule {
|
|
||||||
to: Some(vec![NetworkPolicyPeer {
|
|
||||||
ip_block: Some(IPBlock {
|
|
||||||
cidr: "172.18.0.0/16".into(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}]),
|
|
||||||
..Default::default()
|
|
||||||
}]),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
assert_eq!(expected_policy, policy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::{io::Write, process::Command, sync::Arc};
|
use std::{io::Write, process::Command, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::info;
|
use log::{debug, error};
|
||||||
use serde_yaml::Value;
|
use serde_yaml::Value;
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
|
|
||||||
@@ -56,11 +56,14 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
chart_url: String,
|
chart_url: String,
|
||||||
image_name: String,
|
image_name: String,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
error!(
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
|
||||||
|
);
|
||||||
|
|
||||||
|
error!("TODO hardcoded k3d bin path is wrong");
|
||||||
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
|
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
|
||||||
// --- 1. Import the container image into the k3d cluster ---
|
// --- 1. Import the container image into the k3d cluster ---
|
||||||
info!(
|
debug!(
|
||||||
"Importing image '{}' into k3d cluster 'harmony'",
|
"Importing image '{}' into k3d cluster 'harmony'",
|
||||||
image_name
|
image_name
|
||||||
);
|
);
|
||||||
@@ -77,7 +80,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
|
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
|
||||||
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
|
||||||
let kubeconfig_output = Command::new(&k3d_bin_path)
|
let kubeconfig_output = Command::new(&k3d_bin_path)
|
||||||
.args(["kubeconfig", "get", "harmony"])
|
.args(["kubeconfig", "get", "harmony"])
|
||||||
.output()
|
.output()
|
||||||
@@ -98,7 +101,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
|
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
|
||||||
|
|
||||||
// --- 3. Install or upgrade the Helm chart in the cluster ---
|
// --- 3. Install or upgrade the Helm chart in the cluster ---
|
||||||
info!(
|
debug!(
|
||||||
"Deploying Helm chart '{}' to namespace '{}'",
|
"Deploying Helm chart '{}' to namespace '{}'",
|
||||||
chart_url, app_name
|
chart_url, app_name
|
||||||
);
|
);
|
||||||
@@ -128,7 +131,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,12 +151,14 @@ impl<
|
|||||||
// Or ask for it when unknown
|
// Or ask for it when unknown
|
||||||
|
|
||||||
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
let helm_chart = self.application.build_push_helm_package(&image).await?;
|
||||||
|
debug!("Pushed new helm chart {helm_chart}");
|
||||||
|
|
||||||
// TODO: Make building image configurable/skippable if image already exists (prompt)")
|
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/104
|
|
||||||
let image = self.application.build_push_oci_image().await?;
|
let image = self.application.build_push_oci_image().await?;
|
||||||
|
debug!("Pushed new docker image {image}");
|
||||||
|
|
||||||
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
|
debug!("Installing ContinuousDelivery feature");
|
||||||
|
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
|
||||||
// by the topology only and we should not have to know how to perform tasks like this for
|
// by the topology only and we should not have to know how to perform tasks like this for
|
||||||
// which the topology should be responsible.
|
// which the topology should be responsible.
|
||||||
//
|
//
|
||||||
@@ -166,20 +171,17 @@ impl<
|
|||||||
// access it. This forces every Topology to understand the concept of targets though... So
|
// access it. This forces every Topology to understand the concept of targets though... So
|
||||||
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
|
||||||
// goes. It still does not feel right though.
|
// goes. It still does not feel right though.
|
||||||
//
|
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/106
|
|
||||||
match topology.current_target() {
|
match topology.current_target() {
|
||||||
DeploymentTarget::LocalDev => {
|
DeploymentTarget::LocalDev => {
|
||||||
info!("Deploying {} locally...", self.application.name());
|
|
||||||
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
target => {
|
target => {
|
||||||
info!("Deploying {} to target {target:?}", self.application.name());
|
debug!("Deploying to target {target:?}");
|
||||||
let score = ArgoHelmScore {
|
let score = ArgoHelmScore {
|
||||||
namespace: "harmony-example-rust-webapp".to_string(),
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
openshift: true,
|
openshift: false,
|
||||||
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
|
||||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||||
version: Version::from("0.1.0").unwrap(),
|
version: Version::from("0.1.0").unwrap(),
|
||||||
@@ -187,7 +189,7 @@ impl<
|
|||||||
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
name: "harmony-demo-rust-webapp".to_string(),
|
name: "harmony-demo-rust-webapp".to_string(),
|
||||||
namespace: "harmony-example-rust-webapp".to_string(),
|
namespace: "harmonydemo-staging".to_string(),
|
||||||
})],
|
})],
|
||||||
};
|
};
|
||||||
score
|
score
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use log::error;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@@ -49,6 +50,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
error!("Uncomment below, only disabled for debugging");
|
||||||
self.score.interpret(inventory, topology).await?;
|
self.score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
let k8s_client = topology.k8s_client().await?;
|
let k8s_client = topology.k8s_client().await?;
|
||||||
@@ -56,14 +58,9 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
|||||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"ArgoCD installed with {} {}",
|
"ArgoCD installed with {} applications",
|
||||||
self.argo_apps.len(),
|
self.argo_apps.len()
|
||||||
match self.argo_apps.len() {
|
|
||||||
1 => "application",
|
|
||||||
_ => "applications",
|
|
||||||
}
|
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use crate::modules::application::{Application, ApplicationFeature};
|
|||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
|
|
||||||
use crate::topology::MultiTargetTopology;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::{
|
modules::monitoring::{
|
||||||
@@ -34,7 +33,6 @@ impl<
|
|||||||
+ 'static
|
+ 'static
|
||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
+ PrometheusApplicationMonitoring<CRDPrometheus>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
@@ -57,11 +55,11 @@ impl<
|
|||||||
};
|
};
|
||||||
let ntfy = NtfyScore {
|
let ntfy = NtfyScore {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
|
host: "localhost".to_string(),
|
||||||
};
|
};
|
||||||
ntfy.interpret(&Inventory::empty(), topology)
|
ntfy.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.expect("couldn't create interpret for ntfy");
|
||||||
|
|
||||||
let ntfy_default_auth_username = "harmony";
|
let ntfy_default_auth_username = "harmony";
|
||||||
let ntfy_default_auth_password = "harmony";
|
let ntfy_default_auth_password = "harmony";
|
||||||
@@ -98,7 +96,7 @@ impl<
|
|||||||
alerting_score
|
alerting_score
|
||||||
.interpret(&Inventory::empty(), topology)
|
.interpret(&Inventory::empty(), topology)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.to_string())?;
|
.unwrap();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -14,19 +14,11 @@ use serde::Serialize;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::{Id, Version},
|
data::{Id, Version},
|
||||||
instrumentation::{self, HarmonyEvent},
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::Topology,
|
topology::Topology,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum ApplicationFeatureStatus {
|
|
||||||
Installing,
|
|
||||||
Installed,
|
|
||||||
Failed { details: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait Application: std::fmt::Debug + Send + Sync {
|
pub trait Application: std::fmt::Debug + Send + Sync {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
}
|
}
|
||||||
@@ -55,34 +47,13 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
|||||||
.join(", ")
|
.join(", ")
|
||||||
);
|
);
|
||||||
for feature in self.features.iter() {
|
for feature in self.features.iter() {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
debug!(
|
||||||
topology: topology.name().into(),
|
"Installing feature {} for application {app_name}",
|
||||||
application: self.application.name(),
|
feature.name()
|
||||||
feature: feature.name(),
|
);
|
||||||
status: ApplicationFeatureStatus::Installing,
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let _ = match feature.ensure_installed(topology).await {
|
let _ = match feature.ensure_installed(topology).await {
|
||||||
Ok(()) => {
|
Ok(()) => (),
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
|
||||||
topology: topology.name().into(),
|
|
||||||
application: self.application.name(),
|
|
||||||
feature: feature.name(),
|
|
||||||
status: ApplicationFeatureStatus::Installed,
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
Err(msg) => {
|
Err(msg) => {
|
||||||
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
|
|
||||||
topology: topology.name().into(),
|
|
||||||
application: self.application.name(),
|
|
||||||
feature: feature.name(),
|
|
||||||
status: ApplicationFeatureStatus::Failed {
|
|
||||||
details: msg.clone(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Application Interpret failed to install feature : {msg}"
|
"Application Interpret failed to install feature : {msg}"
|
||||||
)));
|
)));
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
|
|||||||
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
|
||||||
use dockerfile_builder::instruction_builder::CopyBuilder;
|
use dockerfile_builder::instruction_builder::CopyBuilder;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use log::{debug, info, log_enabled};
|
use log::{debug, error, log_enabled};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tar::Archive;
|
use tar::Archive;
|
||||||
|
|
||||||
@@ -73,19 +73,19 @@ impl Application for RustWebapp {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HelmPackage for RustWebapp {
|
impl HelmPackage for RustWebapp {
|
||||||
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
|
||||||
info!("Starting Helm chart build and push for '{}'", self.name);
|
debug!("Starting Helm chart build and push for '{}'", self.name);
|
||||||
|
|
||||||
// 1. Create the Helm chart files on disk.
|
// 1. Create the Helm chart files on disk.
|
||||||
let chart_dir = self
|
let chart_dir = self
|
||||||
.create_helm_chart_files(image_url)
|
.create_helm_chart_files(image_url)
|
||||||
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
|
||||||
info!("Successfully created Helm chart files in {:?}", chart_dir);
|
debug!("Successfully created Helm chart files in {:?}", chart_dir);
|
||||||
|
|
||||||
// 2. Package the chart into a .tgz archive.
|
// 2. Package the chart into a .tgz archive.
|
||||||
let packaged_chart_path = self
|
let packaged_chart_path = self
|
||||||
.package_helm_chart(&chart_dir)
|
.package_helm_chart(&chart_dir)
|
||||||
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
|
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
|
||||||
info!(
|
debug!(
|
||||||
"Successfully packaged Helm chart: {}",
|
"Successfully packaged Helm chart: {}",
|
||||||
packaged_chart_path.to_string_lossy()
|
packaged_chart_path.to_string_lossy()
|
||||||
);
|
);
|
||||||
@@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp {
|
|||||||
let oci_chart_url = self
|
let oci_chart_url = self
|
||||||
.push_helm_chart(&packaged_chart_path)
|
.push_helm_chart(&packaged_chart_path)
|
||||||
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
|
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
|
||||||
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
|
||||||
|
|
||||||
Ok(oci_chart_url)
|
Ok(oci_chart_url)
|
||||||
}
|
}
|
||||||
@@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp {
|
|||||||
async fn build_push_oci_image(&self) -> Result<String, String> {
|
async fn build_push_oci_image(&self) -> Result<String, String> {
|
||||||
// This function orchestrates the build and push process.
|
// This function orchestrates the build and push process.
|
||||||
// It's async to match the trait definition, though the underlying docker commands are blocking.
|
// It's async to match the trait definition, though the underlying docker commands are blocking.
|
||||||
info!("Starting OCI image build and push for '{}'", self.name);
|
debug!("Starting OCI image build and push for '{}'", self.name);
|
||||||
|
|
||||||
// 1. Build the image by calling the synchronous helper function.
|
// 1. Build the image by calling the synchronous helper function.
|
||||||
let image_tag = self.image_name();
|
let image_tag = self.image_name();
|
||||||
self.build_docker_image(&image_tag)
|
self.build_docker_image(&image_tag)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
|
||||||
info!("Successfully built Docker image: {}", image_tag);
|
debug!("Successfully built Docker image: {}", image_tag);
|
||||||
|
|
||||||
// 2. Push the image to the registry.
|
// 2. Push the image to the registry.
|
||||||
self.push_docker_image(&image_tag)
|
self.push_docker_image(&image_tag)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
|
||||||
info!("Successfully pushed Docker image to: {}", image_tag);
|
debug!("Successfully pushed Docker image to: {}", image_tag);
|
||||||
|
|
||||||
Ok(image_tag)
|
Ok(image_tag)
|
||||||
}
|
}
|
||||||
@@ -195,7 +195,7 @@ impl RustWebapp {
|
|||||||
);
|
);
|
||||||
|
|
||||||
while let Some(msg) = image_build_stream.next().await {
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
debug!("Message: {msg:?}");
|
println!("Message: {msg:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(image_name.to_string())
|
Ok(image_name.to_string())
|
||||||
@@ -219,7 +219,7 @@ impl RustWebapp {
|
|||||||
);
|
);
|
||||||
|
|
||||||
while let Some(msg) = push_image_stream.next().await {
|
while let Some(msg) = push_image_stream.next().await {
|
||||||
debug!("Message: {msg:?}");
|
println!("Message: {msg:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(image_tag.to_string())
|
Ok(image_tag.to_string())
|
||||||
@@ -288,8 +288,9 @@ impl RustWebapp {
|
|||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
// Copy the compiled binary from the builder stage.
|
// Copy the compiled binary from the builder stage.
|
||||||
// TODO: Should not be using score name here, instead should use name from Cargo.toml
|
error!(
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/105
|
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||||
|
);
|
||||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||||
let binary_path_in_final = format!("/home/appuser/{}", self.name);
|
let binary_path_in_final = format!("/home/appuser/{}", self.name);
|
||||||
dockerfile.push(
|
dockerfile.push(
|
||||||
@@ -327,8 +328,9 @@ impl RustWebapp {
|
|||||||
));
|
));
|
||||||
|
|
||||||
// Copy only the compiled binary from the builder stage.
|
// Copy only the compiled binary from the builder stage.
|
||||||
// TODO: Should not be using score name here, instead should use name from Cargo.toml
|
error!(
|
||||||
// https://git.nationtech.io/NationTech/harmony/issues/105
|
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
|
||||||
|
);
|
||||||
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
|
||||||
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
|
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
|
||||||
dockerfile.push(
|
dockerfile.push(
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ impl Default for K3DInstallationScore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for K3DInstallationScore {
|
impl<T: Topology> Score<T> for K3DInstallationScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
|
||||||
Box::new(K3dInstallationInterpret {
|
Box::new(K3dInstallationInterpret {
|
||||||
score: self.clone(),
|
score: self.clone(),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -14,6 +14,5 @@ pub mod monitoring;
|
|||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
pub mod storage;
|
|
||||||
pub mod tenant;
|
pub mod tenant;
|
||||||
pub mod tftp;
|
pub mod tftp;
|
||||||
|
|||||||
@@ -1,25 +1,9 @@
|
|||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use crate::{modules::helm::chart::HelmChartScore, topology::DeploymentTarget};
|
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||||
|
|
||||||
pub fn ntfy_helm_chart_score(
|
|
||||||
namespace: String,
|
|
||||||
host: String,
|
|
||||||
target: DeploymentTarget,
|
|
||||||
) -> HelmChartScore {
|
|
||||||
// TODO not actually the correct logic, this should be fixed by using an ingresss which is the
|
|
||||||
// correct k8s standard.
|
|
||||||
//
|
|
||||||
// Another option is to delegate to the topology the ingress technology it wants to use Route,
|
|
||||||
// Ingress or other
|
|
||||||
let route_enabled = match target {
|
|
||||||
DeploymentTarget::LocalDev => false,
|
|
||||||
DeploymentTarget::Staging => true,
|
|
||||||
DeploymentTarget::Production => true,
|
|
||||||
};
|
|
||||||
let ingress_enabled = !route_enabled;
|
|
||||||
|
|
||||||
|
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
|
||||||
let values = format!(
|
let values = format!(
|
||||||
r#"
|
r#"
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
@@ -41,14 +25,23 @@ serviceAccount:
|
|||||||
|
|
||||||
service:
|
service:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
port: 8080
|
port: 80
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: {ingress_enabled}
|
enabled: true
|
||||||
|
# annotations:
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: {host}
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
route:
|
|
||||||
enabled: {route_enabled}
|
|
||||||
host: {host}
|
|
||||||
|
|
||||||
autoscaling:
|
autoscaling:
|
||||||
enabled: false
|
enabled: false
|
||||||
@@ -56,7 +49,7 @@ autoscaling:
|
|||||||
config:
|
config:
|
||||||
enabled: true
|
enabled: true
|
||||||
data:
|
data:
|
||||||
base-url: "https://{host}"
|
# base-url: "https://ntfy.something.com"
|
||||||
auth-file: "/var/cache/ntfy/user.db"
|
auth-file: "/var/cache/ntfy/user.db"
|
||||||
auth-default-access: "deny-all"
|
auth-default-access: "deny-all"
|
||||||
cache-file: "/var/cache/ntfy/cache.db"
|
cache-file: "/var/cache/ntfy/cache.db"
|
||||||
@@ -66,7 +59,6 @@ config:
|
|||||||
enable-signup: false
|
enable-signup: false
|
||||||
enable-login: "true"
|
enable-login: "true"
|
||||||
enable-metrics: "true"
|
enable-metrics: "true"
|
||||||
listen-http: ":8080"
|
|
||||||
|
|
||||||
persistence:
|
persistence:
|
||||||
enabled: true
|
enabled: true
|
||||||
@@ -77,12 +69,16 @@ persistence:
|
|||||||
HelmChartScore {
|
HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
|
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
|
||||||
release_name: NonBlankString::from_str("ntfy").unwrap(),
|
release_name: NonBlankString::from_str("ntfy").unwrap(),
|
||||||
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/harmony/ntfy").unwrap(),
|
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
|
||||||
chart_version: Some(NonBlankString::from_str("0.1.7-nationtech.1").unwrap()),
|
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
values_yaml: Some(values.to_string()),
|
values_yaml: Some(values.to_string()),
|
||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: false,
|
install_only: false,
|
||||||
repository: None,
|
repository: Some(HelmRepository::new(
|
||||||
|
"sarab97".to_string(),
|
||||||
|
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use log::info;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use strum::{Display, EnumString};
|
use strum::{Display, EnumString};
|
||||||
|
|
||||||
@@ -11,7 +11,7 @@ use crate::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
|
topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
@@ -20,7 +20,7 @@ pub struct NtfyScore {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for NtfyScore {
|
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(NtfyInterpret {
|
Box::new(NtfyInterpret {
|
||||||
score: self.clone(),
|
score: self.clone(),
|
||||||
@@ -77,7 +77,7 @@ impl NtfyInterpret {
|
|||||||
vec![
|
vec![
|
||||||
"sh",
|
"sh",
|
||||||
"-c",
|
"-c",
|
||||||
format!("NTFY_PASSWORD={password} ntfy user add --role={role} --ignore-exists {username}")
|
format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}")
|
||||||
.as_str(),
|
.as_str(),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -89,27 +89,22 @@ impl NtfyInterpret {
|
|||||||
|
|
||||||
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
|
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> for NtfyInterpret {
|
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
&self,
|
&self,
|
||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
ntfy_helm_chart_score(
|
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
|
||||||
self.score.namespace.clone(),
|
.interpret(inventory, topology)
|
||||||
self.score.host.clone(),
|
.await?;
|
||||||
topology.current_target(),
|
|
||||||
)
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("installed ntfy helm chart");
|
debug!("installed ntfy helm chart");
|
||||||
let client = topology
|
let client = topology
|
||||||
.k8s_client()
|
.k8s_client()
|
||||||
.await
|
.await
|
||||||
.expect("couldn't get k8s client");
|
.expect("couldn't get k8s client");
|
||||||
|
|
||||||
info!("deploying ntfy...");
|
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready(
|
.wait_until_deployment_ready(
|
||||||
"ntfy".to_string(),
|
"ntfy".to_string(),
|
||||||
@@ -117,12 +112,12 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
info!("ntfy deployed");
|
debug!("created k8s client");
|
||||||
|
|
||||||
info!("adding user harmony");
|
|
||||||
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
|
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
|
||||||
.await?;
|
.await?;
|
||||||
info!("user added");
|
|
||||||
|
debug!("exec into pod done");
|
||||||
|
|
||||||
Ok(Outcome::success("Ntfy installed".to_string()))
|
Ok(Outcome::success("Ntfy installed".to_string()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -166,8 +166,7 @@ impl K8sPrometheusCRDAlertingInterpret {
|
|||||||
|
|
||||||
let install_output = Command::new("helm")
|
let install_output = Command::new("helm")
|
||||||
.args([
|
.args([
|
||||||
"upgrade",
|
"install",
|
||||||
"--install",
|
|
||||||
&chart_name,
|
&chart_name,
|
||||||
tgz_path.to_str().unwrap(),
|
tgz_path.to_str().unwrap(),
|
||||||
"--namespace",
|
"--namespace",
|
||||||
|
|||||||
@@ -1,419 +0,0 @@
|
|||||||
use std::{
|
|
||||||
process::Command,
|
|
||||||
sync::Arc,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use log::{info, warn};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tokio::time::sleep;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::{Id, Version},
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{K8sclient, Topology, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct CephRemoveOsd {
|
|
||||||
osd_deployment_name: String,
|
|
||||||
rook_ceph_namespace: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("CephRemoveOsdScore")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(CephRemoveOsdInterpret {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CephRemoveOsdInterpret {
|
|
||||||
score: CephRemoveOsd,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let client = topology.k8s_client().await.unwrap();
|
|
||||||
self.verify_ceph_toolbox_exists(client.clone()).await?;
|
|
||||||
self.scale_deployment(client.clone()).await?;
|
|
||||||
self.verify_deployment_scaled(client.clone()).await?;
|
|
||||||
self.delete_deployment(client.clone()).await?;
|
|
||||||
self.verify_deployment_deleted(client.clone()).await?;
|
|
||||||
let osd_id_full = self.get_ceph_osd_id().unwrap();
|
|
||||||
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
|
|
||||||
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
|
|
||||||
osd_id_full, self.score.osd_deployment_name
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CephRemoveOsdInterpret {
|
|
||||||
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
|
|
||||||
let osd_id_numeric = self
|
|
||||||
.score
|
|
||||||
.osd_deployment_name
|
|
||||||
.split('-')
|
|
||||||
.nth(3)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
InterpretError::new(format!(
|
|
||||||
"Could not parse OSD id from deployment name {}",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let osd_id_full = format!("osd.{}", osd_id_numeric);
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Targeting Ceph OSD: {} (parsed from deployment {})",
|
|
||||||
osd_id_full, self.score.osd_deployment_name
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(osd_id_full)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_ceph_toolbox_exists(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let toolbox_dep = "rook-ceph-tools".to_string();
|
|
||||||
|
|
||||||
match client
|
|
||||||
.get_deployment(&toolbox_dep, Some(&self.score.rook_ceph_namespace))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(Some(deployment)) => {
|
|
||||||
if let Some(status) = deployment.status {
|
|
||||||
let ready_count = status.ready_replicas.unwrap_or(0);
|
|
||||||
if ready_count >= 1 {
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"'{}' is ready with {} replica(s).",
|
|
||||||
&toolbox_dep, ready_count
|
|
||||||
)));
|
|
||||||
} else {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"ceph-tool-box not ready in cluster".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"failed to get deployment status {}",
|
|
||||||
&toolbox_dep
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => Err(InterpretError::new(format!(
|
|
||||||
"Deployment '{}' not found in namespace '{}'.",
|
|
||||||
&toolbox_dep, self.score.rook_ceph_namespace
|
|
||||||
))),
|
|
||||||
Err(e) => Err(InterpretError::new(format!(
|
|
||||||
"Failed to query for deployment '{}': {}",
|
|
||||||
&toolbox_dep, e
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn scale_deployment(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!(
|
|
||||||
"Scaling down OSD deployment: {}",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
);
|
|
||||||
client
|
|
||||||
.scale_deployment(
|
|
||||||
&self.score.osd_deployment_name,
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"Scaled down deployment {}",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_deployment_scaled(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let (timeout, interval, start) = self.build_timer();
|
|
||||||
|
|
||||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
|
||||||
loop {
|
|
||||||
let dep = client
|
|
||||||
.get_deployment(
|
|
||||||
&self.score.osd_deployment_name,
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(deployment) = dep {
|
|
||||||
if let Some(status) = deployment.status {
|
|
||||||
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
|
|
||||||
{
|
|
||||||
return Ok(Outcome::success(
|
|
||||||
"Deployment successfully scaled down.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if start.elapsed() > timeout {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Timed out waiting for deployment {} to scale down",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
sleep(interval).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_timer(&self) -> (Duration, Duration, Instant) {
|
|
||||||
let timeout = Duration::from_secs(120);
|
|
||||||
let interval = Duration::from_secs(5);
|
|
||||||
let start = Instant::now();
|
|
||||||
(timeout, interval, start)
|
|
||||||
}
|
|
||||||
pub async fn delete_deployment(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!(
|
|
||||||
"Deleting OSD deployment: {}",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
);
|
|
||||||
client
|
|
||||||
.delete_deployment(
|
|
||||||
&self.score.osd_deployment_name,
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"deployment {} deleted",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_deployment_deleted(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let (timeout, interval, start) = self.build_timer();
|
|
||||||
|
|
||||||
info!("Waiting for OSD deployment to scale down to 0 replicas");
|
|
||||||
loop {
|
|
||||||
let dep = client
|
|
||||||
.get_deployment(
|
|
||||||
&self.score.osd_deployment_name,
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if dep.is_none() {
|
|
||||||
info!(
|
|
||||||
"Deployment {} successfully deleted.",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
);
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"Deployment {} deleted.",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if start.elapsed() > timeout {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Timed out waiting for deployment {} to be deleted",
|
|
||||||
self.score.osd_deployment_name
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
sleep(interval).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_osd_tree(&self, json: serde_json::Value) -> Result<CephOsdTree, InterpretError> {
|
|
||||||
let nodes = json.get("nodes").ok_or_else(|| {
|
|
||||||
InterpretError::new("Missing 'nodes' field in ceph osd tree JSON".to_string())
|
|
||||||
})?;
|
|
||||||
let tree: CephOsdTree = CephOsdTree {
|
|
||||||
nodes: serde_json::from_value(nodes.clone()).map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to parse ceph osd tree JSON: {}", e))
|
|
||||||
})?,
|
|
||||||
};
|
|
||||||
Ok(tree)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn purge_ceph_osd(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
osd_id_full: &str,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
info!(
|
|
||||||
"Purging OSD {} from Ceph cluster and removing its auth key",
|
|
||||||
osd_id_full
|
|
||||||
);
|
|
||||||
client
|
|
||||||
.exec_app_capture_output(
|
|
||||||
"rook-ceph-tools".to_string(),
|
|
||||||
"app".to_string(),
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
vec![
|
|
||||||
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
|
|
||||||
format!("ceph auth del osd.{osd_id_full}").as_str(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"osd id {} removed from osd tree",
|
|
||||||
osd_id_full
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_ceph_osd_removal(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
osd_id_full: &str,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let (timeout, interval, start) = self.build_timer();
|
|
||||||
info!(
|
|
||||||
"Verifying OSD {} has been removed from the Ceph tree...",
|
|
||||||
osd_id_full
|
|
||||||
);
|
|
||||||
loop {
|
|
||||||
let output = client
|
|
||||||
.exec_app_capture_output(
|
|
||||||
"rook-ceph-tools".to_string(),
|
|
||||||
"app".to_string(),
|
|
||||||
Some(&self.score.rook_ceph_namespace),
|
|
||||||
vec!["ceph osd tree -f json"],
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let tree =
|
|
||||||
self.get_osd_tree(serde_json::from_str(&output).expect("could not extract json"));
|
|
||||||
|
|
||||||
let osd_found = tree
|
|
||||||
.unwrap()
|
|
||||||
.nodes
|
|
||||||
.iter()
|
|
||||||
.any(|node| node.name == osd_id_full);
|
|
||||||
|
|
||||||
if !osd_found {
|
|
||||||
return Ok(Outcome::success(format!(
|
|
||||||
"Successfully verified that OSD {} is removed from the Ceph cluster.",
|
|
||||||
osd_id_full,
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if start.elapsed() > timeout {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Timed out waiting for OSD {} to be removed from Ceph tree",
|
|
||||||
osd_id_full
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
warn!(
|
|
||||||
"OSD {} still found in Ceph tree, retrying in {:?}...",
|
|
||||||
osd_id_full, interval
|
|
||||||
);
|
|
||||||
sleep(interval).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[derive(Debug, Deserialize, PartialEq)]
|
|
||||||
pub struct CephOsdTree {
|
|
||||||
pub nodes: Vec<CephNode>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, PartialEq)]
|
|
||||||
pub struct CephNode {
|
|
||||||
pub id: i32,
|
|
||||||
pub name: String,
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
pub node_type: String,
|
|
||||||
pub type_id: Option<i32>,
|
|
||||||
pub children: Option<Vec<i32>>,
|
|
||||||
pub exists: Option<i32>,
|
|
||||||
pub status: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use serde_json::json;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_get_osd_tree() {
|
|
||||||
let json_data = json!({
|
|
||||||
"nodes": [
|
|
||||||
{"id": 1, "name": "osd.1", "type": "osd", "primary_affinity":"1"},
|
|
||||||
{"id": 2, "name": "osd.2", "type": "osd", "crush_weight": 1.22344}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
let interpret = CephRemoveOsdInterpret {
|
|
||||||
score: CephRemoveOsd {
|
|
||||||
osd_deployment_name: "osd-1".to_string(),
|
|
||||||
rook_ceph_namespace: "dummy_ns".to_string(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let json = interpret.get_osd_tree(json_data).unwrap();
|
|
||||||
|
|
||||||
let expected = CephOsdTree {
|
|
||||||
nodes: vec![
|
|
||||||
CephNode {
|
|
||||||
id: 1,
|
|
||||||
name: "osd.1".to_string(),
|
|
||||||
node_type: "osd".to_string(),
|
|
||||||
type_id: None,
|
|
||||||
children: None,
|
|
||||||
exists: None,
|
|
||||||
status: None,
|
|
||||||
},
|
|
||||||
CephNode {
|
|
||||||
id: 2,
|
|
||||||
name: "osd.2".to_string(),
|
|
||||||
node_type: "osd".to_string(),
|
|
||||||
type_id: None,
|
|
||||||
children: None,
|
|
||||||
exists: None,
|
|
||||||
status: None,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(json, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
pub mod ceph_remove_osd_score;
|
|
||||||
pub mod rook_ceph_helm_chart_score;
|
|
||||||
pub mod rook_ceph_cluster_helm_chart_score;
|
|
||||||
pub mod rook_ceph_install_score;
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
|
||||||
|
|
||||||
pub fn rook_ceph_cluster_helm_chart(ns: &str) -> HelmChartScore {
|
|
||||||
let values = r#"
|
|
||||||
monitoring:
|
|
||||||
enabled: true
|
|
||||||
createPrometheusRules: true
|
|
||||||
cephClusterSpec:
|
|
||||||
placement:
|
|
||||||
all:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: storage-node
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "true"
|
|
||||||
dashboard:
|
|
||||||
ssl: false
|
|
||||||
prometheusEndpoint: http://prometheus-operated:9090
|
|
||||||
prometheusEndpointSSLVerify: false
|
|
||||||
toolbox:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
"#
|
|
||||||
.to_string();
|
|
||||||
HelmChartScore {
|
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
|
||||||
release_name: NonBlankString::from_str("rook-ceph").unwrap(),
|
|
||||||
chart_name: NonBlankString::from_str("https://charts.rook.io/release/rook-release/rook-ceph-cluster").unwrap(),
|
|
||||||
chart_version: todo!(),
|
|
||||||
values_overrides: todo!(),
|
|
||||||
values_yaml: Some(values.to_string()),
|
|
||||||
create_namespace: todo!(),
|
|
||||||
install_only: todo!(),
|
|
||||||
repository: todo!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use non_blank_string_rs::NonBlankString;
|
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
|
||||||
|
|
||||||
pub fn rook_ceph_helm_chart(ns: &str) -> HelmChartScore {
|
|
||||||
let values = r#"
|
|
||||||
monitoring:
|
|
||||||
enabled: true
|
|
||||||
"#
|
|
||||||
.to_string();
|
|
||||||
HelmChartScore {
|
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
|
||||||
release_name: NonBlankString::from_str("rook-ceph").unwrap(),
|
|
||||||
chart_name: NonBlankString::from_str("https://charts.rook.io/release/rook-release/rook-ceph").unwrap(),
|
|
||||||
chart_version: todo!(),
|
|
||||||
values_overrides: todo!(),
|
|
||||||
values_yaml: Some(values.to_string()),
|
|
||||||
create_namespace: todo!(),
|
|
||||||
install_only: todo!(),
|
|
||||||
repository: todo!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::{Id, Version},
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
score::Score,
|
|
||||||
topology::{HelmCommand, Topology},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct RookCephInstall {
|
|
||||||
namespace: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + HelmCommand> Score<T> for RookCephInstall {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"RookCephInstall".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(RookCephInstallInterpret {
|
|
||||||
score: self.score.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct RookCephInstallInterpret {
|
|
||||||
score: RookCephInstall,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + HelmCommand> Interpret<T> for RookCephInstallInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<InterpretError, Outcome> {
|
|
||||||
self.label_nodes();
|
|
||||||
self.install_rook_helm_chart(self.score.namespace);
|
|
||||||
self.install_rook_cluster_helm_chart(self.score.namespace);
|
|
||||||
//TODO I think we will need to add a capability OCClient to interact with the okd
|
|
||||||
//cli tool
|
|
||||||
self.add_oc_adm_policy(self.score.namespace);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RookCephInstallInterpret {
|
|
||||||
fn label_nodes(&self) -> _ {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn install_rook_helm_chart(&self, namespace: String) -> _ {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn install_rook_cluster_helm_chart(&self, namespace: String) -> _ {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_oc_adm_policy(&self, namespace: String) -> _ {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub mod ceph;
|
|
||||||
@@ -5,10 +5,6 @@ version.workspace = true
|
|||||||
readme.workspace = true
|
readme.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[features]
|
|
||||||
default = ["tui"]
|
|
||||||
tui = ["dep:harmony_tui"]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
assert_cmd = "2.0.17"
|
assert_cmd = "2.0.17"
|
||||||
clap = { version = "4.5.35", features = ["derive"] }
|
clap = { version = "4.5.35", features = ["derive"] }
|
||||||
@@ -22,7 +18,8 @@ indicatif = "0.18.0"
|
|||||||
lazy_static = "1.5.0"
|
lazy_static = "1.5.0"
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
indicatif-log-bridge = "0.2.3"
|
indicatif-log-bridge = "0.2.3"
|
||||||
chrono.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
harmony = { path = "../harmony", features = ["testing"] }
|
[features]
|
||||||
|
default = ["tui"]
|
||||||
|
tui = ["dep:harmony_tui"]
|
||||||
|
|||||||
@@ -1,17 +1,16 @@
|
|||||||
use chrono::Local;
|
|
||||||
use console::style;
|
|
||||||
use harmony::{
|
use harmony::{
|
||||||
instrumentation::{self, HarmonyEvent},
|
instrumentation::{self, HarmonyEvent},
|
||||||
modules::application::ApplicationFeatureStatus,
|
|
||||||
topology::TopologyStatus,
|
topology::TopologyStatus,
|
||||||
};
|
};
|
||||||
use log::{error, info, log_enabled};
|
use indicatif::MultiProgress;
|
||||||
use std::io::Write;
|
use indicatif_log_bridge::LogWrapper;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use crate::progress::{IndicatifProgressTracker, ProgressTracker};
|
||||||
|
|
||||||
pub fn init() -> tokio::task::JoinHandle<()> {
|
pub fn init() -> tokio::task::JoinHandle<()> {
|
||||||
configure_logger();
|
let base_progress = configure_logger();
|
||||||
let handle = tokio::spawn(handle_events());
|
let handle = tokio::spawn(handle_events(base_progress));
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if instrumentation::instrument(HarmonyEvent::HarmonyStarted).is_ok() {
|
if instrumentation::instrument(HarmonyEvent::HarmonyStarted).is_ok() {
|
||||||
@@ -22,76 +21,28 @@ pub fn init() -> tokio::task::JoinHandle<()> {
|
|||||||
handle
|
handle
|
||||||
}
|
}
|
||||||
|
|
||||||
fn configure_logger() {
|
fn configure_logger() -> MultiProgress {
|
||||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
let logger =
|
||||||
.format(|buf, record| {
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).build();
|
||||||
let debug_mode = log_enabled!(log::Level::Debug);
|
let level = logger.filter();
|
||||||
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S");
|
let progress = MultiProgress::new();
|
||||||
|
|
||||||
let level = match record.level() {
|
LogWrapper::new(progress.clone(), logger)
|
||||||
log::Level::Error => style("ERROR").red(),
|
.try_init()
|
||||||
log::Level::Warn => style("WARN").yellow(),
|
.unwrap();
|
||||||
log::Level::Info => style("INFO").green(),
|
log::set_max_level(level);
|
||||||
log::Level::Debug => style("DEBUG").blue(),
|
|
||||||
log::Level::Trace => style("TRACE").magenta(),
|
progress
|
||||||
};
|
|
||||||
if let Some(status) = record.key_values().get(log::kv::Key::from("status")) {
|
|
||||||
let status = status.to_borrowed_str().unwrap();
|
|
||||||
let emoji = match status {
|
|
||||||
"finished" => style(crate::theme::EMOJI_SUCCESS.to_string()).green(),
|
|
||||||
"skipped" => style(crate::theme::EMOJI_SKIP.to_string()).yellow(),
|
|
||||||
"failed" => style(crate::theme::EMOJI_ERROR.to_string()).red(),
|
|
||||||
_ => style("".into()),
|
|
||||||
};
|
|
||||||
if debug_mode {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"[{} {:<5} {}] {} {}",
|
|
||||||
timestamp,
|
|
||||||
level,
|
|
||||||
record.target(),
|
|
||||||
emoji,
|
|
||||||
record.args()
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
|
|
||||||
}
|
|
||||||
} else if let Some(emoji) = record.key_values().get(log::kv::Key::from("emoji")) {
|
|
||||||
if debug_mode {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"[{} {:<5} {}] {} {}",
|
|
||||||
timestamp,
|
|
||||||
level,
|
|
||||||
record.target(),
|
|
||||||
emoji,
|
|
||||||
record.args()
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
|
|
||||||
}
|
|
||||||
} else if debug_mode {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"[{} {:<5} {}] {}",
|
|
||||||
timestamp,
|
|
||||||
level,
|
|
||||||
record.target(),
|
|
||||||
record.args()
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
writeln!(buf, "[{:<5}] {}", level, record.args())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_events() {
|
async fn handle_events(base_progress: MultiProgress) {
|
||||||
|
let progress_tracker = Arc::new(IndicatifProgressTracker::new(base_progress.clone()));
|
||||||
let preparing_topology = Arc::new(Mutex::new(false));
|
let preparing_topology = Arc::new(Mutex::new(false));
|
||||||
let current_score: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
|
let current_score: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
|
||||||
|
|
||||||
instrumentation::subscribe("Harmony CLI Logger", {
|
instrumentation::subscribe("Harmony CLI Logger", {
|
||||||
move |event| {
|
move |event| {
|
||||||
|
let progress_tracker = Arc::clone(&progress_tracker);
|
||||||
let preparing_topology = Arc::clone(&preparing_topology);
|
let preparing_topology = Arc::clone(&preparing_topology);
|
||||||
let current_score = Arc::clone(¤t_score);
|
let current_score = Arc::clone(¤t_score);
|
||||||
|
|
||||||
@@ -102,57 +53,89 @@ async fn handle_events() {
|
|||||||
match event {
|
match event {
|
||||||
HarmonyEvent::HarmonyStarted => {}
|
HarmonyEvent::HarmonyStarted => {}
|
||||||
HarmonyEvent::HarmonyFinished => {
|
HarmonyEvent::HarmonyFinished => {
|
||||||
let emoji = crate::theme::EMOJI_HARMONY.to_string();
|
progress_tracker.add_section(
|
||||||
info!(emoji = emoji.as_str(); "Harmony completed");
|
"harmony-summary",
|
||||||
|
&format!("\n{} Harmony completed\n\n", crate::theme::EMOJI_HARMONY),
|
||||||
|
);
|
||||||
|
progress_tracker.add_section("harmony-finished", "\n\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
HarmonyEvent::TopologyStateChanged {
|
HarmonyEvent::TopologyStateChanged {
|
||||||
topology,
|
topology,
|
||||||
status,
|
status,
|
||||||
message,
|
message,
|
||||||
} => match status {
|
} => {
|
||||||
TopologyStatus::Queued => {}
|
let section_key = topology_key(&topology);
|
||||||
TopologyStatus::Preparing => {
|
|
||||||
let emoji = format!("{}", style(crate::theme::EMOJI_TOPOLOGY.to_string()).yellow());
|
match status {
|
||||||
info!(emoji = emoji.as_str(); "Preparing environment: {topology}...");
|
TopologyStatus::Queued => {}
|
||||||
(*preparing_topology) = true;
|
TopologyStatus::Preparing => {
|
||||||
}
|
progress_tracker.add_section(
|
||||||
TopologyStatus::Success => {
|
§ion_key,
|
||||||
(*preparing_topology) = false;
|
&format!(
|
||||||
if let Some(message) = message {
|
"\n{} Preparing environment: {topology}...",
|
||||||
info!(status = "finished"; "{message}");
|
crate::theme::EMOJI_TOPOLOGY
|
||||||
|
),
|
||||||
|
);
|
||||||
|
(*preparing_topology) = true;
|
||||||
|
}
|
||||||
|
TopologyStatus::Success => {
|
||||||
|
(*preparing_topology) = false;
|
||||||
|
progress_tracker.add_task(§ion_key, "topology-success", "");
|
||||||
|
progress_tracker
|
||||||
|
.finish_task("topology-success", &message.unwrap_or("".into()));
|
||||||
|
}
|
||||||
|
TopologyStatus::Noop => {
|
||||||
|
(*preparing_topology) = false;
|
||||||
|
progress_tracker.add_task(§ion_key, "topology-skip", "");
|
||||||
|
progress_tracker
|
||||||
|
.skip_task("topology-skip", &message.unwrap_or("".into()));
|
||||||
|
}
|
||||||
|
TopologyStatus::Error => {
|
||||||
|
progress_tracker.add_task(§ion_key, "topology-error", "");
|
||||||
|
(*preparing_topology) = false;
|
||||||
|
progress_tracker
|
||||||
|
.fail_task("topology-error", &message.unwrap_or("".into()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TopologyStatus::Noop => {
|
}
|
||||||
(*preparing_topology) = false;
|
|
||||||
if let Some(message) = message {
|
|
||||||
info!(status = "skipped"; "{message}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TopologyStatus::Error => {
|
|
||||||
(*preparing_topology) = false;
|
|
||||||
if let Some(message) = message {
|
|
||||||
error!(status = "failed"; "{message}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
HarmonyEvent::InterpretExecutionStarted {
|
HarmonyEvent::InterpretExecutionStarted {
|
||||||
execution_id: _,
|
execution_id: task_key,
|
||||||
topology: _,
|
topology,
|
||||||
interpret: _,
|
interpret: _,
|
||||||
score,
|
score,
|
||||||
message,
|
message,
|
||||||
} => {
|
} => {
|
||||||
if *preparing_topology || current_score.is_some() {
|
let is_key_topology = (*preparing_topology)
|
||||||
info!("{message}");
|
&& progress_tracker.contains_section(&topology_key(&topology));
|
||||||
|
let is_key_current_score = current_score.is_some()
|
||||||
|
&& progress_tracker
|
||||||
|
.contains_section(&score_key(¤t_score.clone().unwrap()));
|
||||||
|
let is_key_score = progress_tracker.contains_section(&score_key(&score));
|
||||||
|
|
||||||
|
let section_key = if is_key_topology {
|
||||||
|
topology_key(&topology)
|
||||||
|
} else if is_key_current_score {
|
||||||
|
score_key(¤t_score.clone().unwrap())
|
||||||
|
} else if is_key_score {
|
||||||
|
score_key(&score)
|
||||||
} else {
|
} else {
|
||||||
(*current_score) = Some(score.clone());
|
(*current_score) = Some(score.clone());
|
||||||
let emoji = format!("{}", style(crate::theme::EMOJI_SCORE).blue());
|
let key = score_key(&score);
|
||||||
info!(emoji = emoji.as_str(); "Interpreting score: {score}...");
|
progress_tracker.add_section(
|
||||||
}
|
&key,
|
||||||
|
&format!(
|
||||||
|
"{} Interpreting score: {score}...",
|
||||||
|
crate::theme::EMOJI_SCORE
|
||||||
|
),
|
||||||
|
);
|
||||||
|
key
|
||||||
|
};
|
||||||
|
|
||||||
|
progress_tracker.add_task(§ion_key, &task_key, &message);
|
||||||
}
|
}
|
||||||
HarmonyEvent::InterpretExecutionFinished {
|
HarmonyEvent::InterpretExecutionFinished {
|
||||||
execution_id: _,
|
execution_id: task_key,
|
||||||
topology: _,
|
topology: _,
|
||||||
interpret: _,
|
interpret: _,
|
||||||
score,
|
score,
|
||||||
@@ -165,36 +148,18 @@ async fn handle_events() {
|
|||||||
match outcome {
|
match outcome {
|
||||||
Ok(outcome) => match outcome.status {
|
Ok(outcome) => match outcome.status {
|
||||||
harmony::interpret::InterpretStatus::SUCCESS => {
|
harmony::interpret::InterpretStatus::SUCCESS => {
|
||||||
info!(status = "finished"; "{}", outcome.message);
|
progress_tracker.finish_task(&task_key, &outcome.message);
|
||||||
}
|
}
|
||||||
harmony::interpret::InterpretStatus::NOOP => {
|
harmony::interpret::InterpretStatus::NOOP => {
|
||||||
info!(status = "skipped"; "{}", outcome.message);
|
progress_tracker.skip_task(&task_key, &outcome.message);
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
error!(status = "failed"; "{}", outcome.message);
|
|
||||||
}
|
}
|
||||||
|
_ => progress_tracker.fail_task(&task_key, &outcome.message),
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(status = "failed"; "{}", err);
|
progress_tracker.fail_task(&task_key, &err.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HarmonyEvent::ApplicationFeatureStateChanged {
|
|
||||||
topology: _,
|
|
||||||
application,
|
|
||||||
feature,
|
|
||||||
status,
|
|
||||||
} => match status {
|
|
||||||
ApplicationFeatureStatus::Installing => {
|
|
||||||
info!("Installing feature '{}' for '{}'...", feature, application);
|
|
||||||
}
|
|
||||||
ApplicationFeatureStatus::Installed => {
|
|
||||||
info!(status = "finished"; "Feature '{}' installed", feature);
|
|
||||||
}
|
|
||||||
ApplicationFeatureStatus::Failed { details } => {
|
|
||||||
error!(status = "failed"; "Feature '{}' installation failed: {}", feature, details);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
@@ -202,3 +167,11 @@ async fn handle_events() {
|
|||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn topology_key(topology: &str) -> String {
|
||||||
|
format!("topology-{topology}")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn score_key(score: &str) -> String {
|
||||||
|
format!("score-{score}")
|
||||||
|
}
|
||||||
|
|||||||
@@ -90,37 +90,13 @@ pub async fn run<T: Topology + Send + Sync + 'static>(
|
|||||||
topology: T,
|
topology: T,
|
||||||
scores: Vec<Box<dyn Score<T>>>,
|
scores: Vec<Box<dyn Score<T>>>,
|
||||||
args_struct: Option<Args>,
|
args_struct: Option<Args>,
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
|
||||||
let args = match args_struct {
|
|
||||||
Some(args) => args,
|
|
||||||
None => Args::parse(),
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "tui"))]
|
|
||||||
if args.interactive {
|
|
||||||
return Err("Not compiled with interactive support".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "tui")]
|
|
||||||
if args.interactive {
|
|
||||||
return harmony_tui::run(inventory, topology, scores).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
run_cli(inventory, topology, scores, args).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_cli<T: Topology + Send + Sync + 'static>(
|
|
||||||
inventory: Inventory,
|
|
||||||
topology: T,
|
|
||||||
scores: Vec<Box<dyn Score<T>>>,
|
|
||||||
args: Args,
|
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let cli_logger_handle = cli_logger::init();
|
let cli_logger_handle = cli_logger::init();
|
||||||
|
|
||||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
||||||
maestro.register_all(scores);
|
maestro.register_all(scores);
|
||||||
|
|
||||||
let result = init(maestro, args).await;
|
let result = init(maestro, args_struct).await;
|
||||||
|
|
||||||
instrumentation::instrument(instrumentation::HarmonyEvent::HarmonyFinished).unwrap();
|
instrumentation::instrument(instrumentation::HarmonyEvent::HarmonyFinished).unwrap();
|
||||||
let _ = tokio::try_join!(cli_logger_handle);
|
let _ = tokio::try_join!(cli_logger_handle);
|
||||||
@@ -129,8 +105,23 @@ pub async fn run_cli<T: Topology + Send + Sync + 'static>(
|
|||||||
|
|
||||||
async fn init<T: Topology + Send + Sync + 'static>(
|
async fn init<T: Topology + Send + Sync + 'static>(
|
||||||
maestro: harmony::maestro::Maestro<T>,
|
maestro: harmony::maestro::Maestro<T>,
|
||||||
args: Args,
|
args_struct: Option<Args>,
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let args = match args_struct {
|
||||||
|
Some(args) => args,
|
||||||
|
None => Args::parse(),
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "tui")]
|
||||||
|
if args.interactive {
|
||||||
|
return harmony_tui::init(maestro).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "tui"))]
|
||||||
|
if args.interactive {
|
||||||
|
return Err("Not compiled with interactive support".into());
|
||||||
|
}
|
||||||
|
|
||||||
let _ = env_logger::builder().try_init();
|
let _ = env_logger::builder().try_init();
|
||||||
|
|
||||||
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
|
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
|
||||||
@@ -141,9 +132,8 @@ async fn init<T: Topology + Send + Sync + 'static>(
|
|||||||
|
|
||||||
// if list option is specified, print filtered list and exit
|
// if list option is specified, print filtered list and exit
|
||||||
if args.list {
|
if args.list {
|
||||||
let num_scores = scores_vec.len();
|
println!("Available scores:");
|
||||||
println!("Available scores {num_scores}:");
|
println!("{}", list_scores_with_index(&scores_vec));
|
||||||
println!("{}\n\n", list_scores_with_index(&scores_vec));
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,7 +165,7 @@ async fn init<T: Topology + Send + Sync + 'static>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod test {
|
||||||
use harmony::{
|
use harmony::{
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
maestro::Maestro,
|
maestro::Maestro,
|
||||||
@@ -202,14 +192,14 @@ mod tests {
|
|||||||
let maestro = init_test_maestro();
|
let maestro = init_test_maestro();
|
||||||
let res = crate::init(
|
let res = crate::init(
|
||||||
maestro,
|
maestro,
|
||||||
crate::Args {
|
Some(crate::Args {
|
||||||
yes: true,
|
yes: true,
|
||||||
filter: Some("SuccessScore".to_owned()),
|
filter: Some("SuccessScore".to_owned()),
|
||||||
interactive: false,
|
interactive: false,
|
||||||
all: true,
|
all: true,
|
||||||
number: 0,
|
number: 0,
|
||||||
list: false,
|
list: false,
|
||||||
},
|
}),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@@ -222,14 +212,14 @@ mod tests {
|
|||||||
|
|
||||||
let res = crate::init(
|
let res = crate::init(
|
||||||
maestro,
|
maestro,
|
||||||
crate::Args {
|
Some(crate::Args {
|
||||||
yes: true,
|
yes: true,
|
||||||
filter: Some("ErrorScore".to_owned()),
|
filter: Some("ErrorScore".to_owned()),
|
||||||
interactive: false,
|
interactive: false,
|
||||||
all: true,
|
all: true,
|
||||||
number: 0,
|
number: 0,
|
||||||
list: false,
|
list: false,
|
||||||
},
|
}),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@@ -242,14 +232,14 @@ mod tests {
|
|||||||
|
|
||||||
let res = crate::init(
|
let res = crate::init(
|
||||||
maestro,
|
maestro,
|
||||||
crate::Args {
|
Some(crate::Args {
|
||||||
yes: true,
|
yes: true,
|
||||||
filter: None,
|
filter: None,
|
||||||
interactive: false,
|
interactive: false,
|
||||||
all: false,
|
all: false,
|
||||||
number: 0,
|
number: 0,
|
||||||
list: false,
|
list: false,
|
||||||
},
|
}),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
|||||||
@@ -33,13 +33,29 @@ pub struct IndicatifProgressTracker {
|
|||||||
|
|
||||||
impl IndicatifProgressTracker {
|
impl IndicatifProgressTracker {
|
||||||
pub fn new(base: MultiProgress) -> Self {
|
pub fn new(base: MultiProgress) -> Self {
|
||||||
let sections = HashMap::new();
|
// The indicatif log bridge will insert a progress bar at the top.
|
||||||
let tasks = HashMap::new();
|
// To prevent our first section from being erased, we need to create
|
||||||
|
// a dummy progress bar as our first progress bar.
|
||||||
|
let _ = base.clear();
|
||||||
|
let log_pb = base.add(ProgressBar::new(1));
|
||||||
|
|
||||||
|
let mut sections = HashMap::new();
|
||||||
|
sections.insert(
|
||||||
|
"__log__".into(),
|
||||||
|
Section {
|
||||||
|
header_index: 0,
|
||||||
|
task_count: 0,
|
||||||
|
pb: log_pb.clone(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut tasks = HashMap::new();
|
||||||
|
tasks.insert("__log__".into(), log_pb);
|
||||||
|
|
||||||
let state = Arc::new(Mutex::new(IndicatifProgressTrackerState {
|
let state = Arc::new(Mutex::new(IndicatifProgressTrackerState {
|
||||||
sections,
|
sections,
|
||||||
tasks,
|
tasks,
|
||||||
pb_count: 0,
|
pb_count: 1,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
Self { mp: base, state }
|
Self { mp: base, state }
|
||||||
|
|||||||
@@ -21,14 +21,10 @@ lazy_static! {
|
|||||||
pub static ref SUCCESS_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
|
pub static ref SUCCESS_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
|
||||||
.clone()
|
.clone()
|
||||||
.tick_strings(&[format!("{}", EMOJI_SUCCESS).as_str()]);
|
.tick_strings(&[format!("{}", EMOJI_SUCCESS).as_str()]);
|
||||||
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
|
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
|
||||||
.template(" {spinner:.orange} {wide_msg}")
|
|
||||||
.unwrap()
|
|
||||||
.clone()
|
.clone()
|
||||||
.tick_strings(&[format!("{}", EMOJI_SKIP).as_str()]);
|
.tick_strings(&[format!("{}", EMOJI_SKIP).as_str()]);
|
||||||
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
|
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
|
||||||
.template(" {spinner:.red} {wide_msg}")
|
|
||||||
.unwrap()
|
|
||||||
.clone()
|
.clone()
|
||||||
.tick_strings(&[format!("{}", EMOJI_ERROR).as_str()]);
|
.tick_strings(&[format!("{}", EMOJI_ERROR).as_str()]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use harmony_cli::progress::{IndicatifProgressTracker, ProgressTracker};
|
use harmony_cli::progress::{IndicatifProgressTracker, ProgressTracker};
|
||||||
use indicatif::MultiProgress;
|
use indicatif::MultiProgress;
|
||||||
|
use log::error;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::instrumentation::{self, HarmonyComposerEvent};
|
use crate::instrumentation::{self, HarmonyComposerEvent};
|
||||||
@@ -52,13 +53,15 @@ pub async fn handle_events() {
|
|||||||
progress_tracker.finish_task(COMPILTATION_TASK, "project compiled");
|
progress_tracker.finish_task(COMPILTATION_TASK, "project compiled");
|
||||||
}
|
}
|
||||||
HarmonyComposerEvent::ProjectCompilationFailed { details } => {
|
HarmonyComposerEvent::ProjectCompilationFailed { details } => {
|
||||||
progress_tracker.fail_task(COMPILTATION_TASK, &format!("failed to compile project:\n{details}"));
|
progress_tracker.fail_task(COMPILTATION_TASK, "failed to compile project");
|
||||||
|
|
||||||
|
error!("{details}");
|
||||||
}
|
}
|
||||||
HarmonyComposerEvent::DeploymentStarted { target, profile } => {
|
HarmonyComposerEvent::DeploymentStarted { target } => {
|
||||||
progress_tracker.add_section(
|
progress_tracker.add_section(
|
||||||
PROGRESS_DEPLOYMENT,
|
PROGRESS_DEPLOYMENT,
|
||||||
&format!(
|
&format!(
|
||||||
"\n{} Deploying project on target '{target}' with profile '{profile}'...\n",
|
"\n{} Deploying project to {target}...\n",
|
||||||
harmony_cli::theme::EMOJI_DEPLOY,
|
harmony_cli::theme::EMOJI_DEPLOY,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
@@ -66,10 +69,6 @@ pub async fn handle_events() {
|
|||||||
HarmonyComposerEvent::DeploymentCompleted => {
|
HarmonyComposerEvent::DeploymentCompleted => {
|
||||||
progress_tracker.clear();
|
progress_tracker.clear();
|
||||||
}
|
}
|
||||||
HarmonyComposerEvent::DeploymentFailed { details } => {
|
|
||||||
progress_tracker.add_task(PROGRESS_DEPLOYMENT, "deployment-failed", "");
|
|
||||||
progress_tracker.fail_task("deployment-failed", &details);
|
|
||||||
},
|
|
||||||
HarmonyComposerEvent::Shutdown => {
|
HarmonyComposerEvent::Shutdown => {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,28 +2,16 @@ use log::debug;
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
use crate::{HarmonyProfile, HarmonyTarget};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum HarmonyComposerEvent {
|
pub enum HarmonyComposerEvent {
|
||||||
HarmonyComposerStarted,
|
HarmonyComposerStarted,
|
||||||
ProjectInitializationStarted,
|
ProjectInitializationStarted,
|
||||||
ProjectInitialized,
|
ProjectInitialized,
|
||||||
ProjectCompilationStarted {
|
ProjectCompilationStarted { details: String },
|
||||||
details: String,
|
|
||||||
},
|
|
||||||
ProjectCompiled,
|
ProjectCompiled,
|
||||||
ProjectCompilationFailed {
|
ProjectCompilationFailed { details: String },
|
||||||
details: String,
|
DeploymentStarted { target: String },
|
||||||
},
|
|
||||||
DeploymentStarted {
|
|
||||||
target: HarmonyTarget,
|
|
||||||
profile: HarmonyProfile,
|
|
||||||
},
|
|
||||||
DeploymentCompleted,
|
DeploymentCompleted,
|
||||||
DeploymentFailed {
|
|
||||||
details: String,
|
|
||||||
},
|
|
||||||
Shutdown,
|
Shutdown,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,18 +23,9 @@ static HARMONY_COMPOSER_EVENT_BUS: Lazy<broadcast::Sender<HarmonyComposerEvent>>
|
|||||||
});
|
});
|
||||||
|
|
||||||
pub fn instrument(event: HarmonyComposerEvent) -> Result<(), &'static str> {
|
pub fn instrument(event: HarmonyComposerEvent) -> Result<(), &'static str> {
|
||||||
#[cfg(not(test))]
|
match HARMONY_COMPOSER_EVENT_BUS.send(event) {
|
||||||
{
|
Ok(_) => Ok(()),
|
||||||
match HARMONY_COMPOSER_EVENT_BUS.send(event) {
|
Err(_) => Err("send error: no subscribers"),
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(_) => Err("send error: no subscribers"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
{
|
|
||||||
let _ = event; // Suppress the "unused variable" warning for `event`
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,11 +49,14 @@ struct CheckArgs {
|
|||||||
|
|
||||||
#[derive(Args, Clone, Debug)]
|
#[derive(Args, Clone, Debug)]
|
||||||
struct DeployArgs {
|
struct DeployArgs {
|
||||||
#[arg(long = "target", short = 't', default_value = "local")]
|
#[arg(long, default_value_t = false)]
|
||||||
harmony_target: HarmonyTarget,
|
staging: bool,
|
||||||
|
|
||||||
#[arg(long = "profile", short = 'p', default_value = "dev")]
|
#[arg(long, default_value_t = false)]
|
||||||
harmony_profile: HarmonyProfile,
|
prod: bool,
|
||||||
|
|
||||||
|
#[arg(long, default_value_t = false)]
|
||||||
|
smoke_test: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Args, Clone, Debug)]
|
#[derive(Args, Clone, Debug)]
|
||||||
@@ -65,38 +68,6 @@ struct AllArgs {
|
|||||||
deploy: DeployArgs,
|
deploy: DeployArgs,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, clap::ValueEnum)]
|
|
||||||
enum HarmonyTarget {
|
|
||||||
Local,
|
|
||||||
Remote,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for HarmonyTarget {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
HarmonyTarget::Local => f.write_str("local"),
|
|
||||||
HarmonyTarget::Remote => f.write_str("remote"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, clap::ValueEnum)]
|
|
||||||
enum HarmonyProfile {
|
|
||||||
Dev,
|
|
||||||
Staging,
|
|
||||||
Production,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for HarmonyProfile {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
HarmonyProfile::Dev => f.write_str("dev"),
|
|
||||||
HarmonyProfile::Staging => f.write_str("staging"),
|
|
||||||
HarmonyProfile::Production => f.write_str("production"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let hc_logger_handle = harmony_composer_logger::init();
|
let hc_logger_handle = harmony_composer_logger::init();
|
||||||
@@ -151,39 +122,26 @@ async fn main() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
Commands::Deploy(args) => {
|
Commands::Deploy(args) => {
|
||||||
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
|
let deploy = if args.staging {
|
||||||
target: args.harmony_target.clone(),
|
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
|
||||||
profile: args.harmony_profile.clone(),
|
target: "staging".to_string(),
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
todo!("implement staging deployment")
|
||||||
if matches!(args.harmony_profile, HarmonyProfile::Dev)
|
} else if args.prod {
|
||||||
&& !matches!(args.harmony_target, HarmonyTarget::Local)
|
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
|
||||||
{
|
target: "prod".to_string(),
|
||||||
instrumentation::instrument(HarmonyComposerEvent::DeploymentFailed {
|
})
|
||||||
details: format!(
|
.unwrap();
|
||||||
"Cannot run profile '{}' on target '{}'. Profile '{}' can run locally only.",
|
todo!("implement prod deployment")
|
||||||
args.harmony_profile, args.harmony_target, args.harmony_profile
|
} else {
|
||||||
),
|
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
|
||||||
}).unwrap();
|
target: "dev".to_string(),
|
||||||
return;
|
})
|
||||||
|
.unwrap();
|
||||||
|
Command::new(harmony_bin_path).arg("-y").arg("-a").spawn()
|
||||||
}
|
}
|
||||||
|
.expect("failed to run harmony deploy");
|
||||||
let use_local_k3d = match args.harmony_target {
|
|
||||||
HarmonyTarget::Local => true,
|
|
||||||
HarmonyTarget::Remote => false,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut command = Command::new(harmony_bin_path);
|
|
||||||
command
|
|
||||||
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
|
|
||||||
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
|
|
||||||
.arg("-y")
|
|
||||||
.arg("-a");
|
|
||||||
|
|
||||||
info!("{:?}", command);
|
|
||||||
|
|
||||||
let deploy = command.spawn().expect("failed to run harmony deploy");
|
|
||||||
|
|
||||||
let deploy_output = deploy.wait_with_output().unwrap();
|
let deploy_output = deploy.wait_with_output().unwrap();
|
||||||
debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());
|
debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony_inventory_agent"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2024"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
actix-web = "4.4"
|
|
||||||
sysinfo = "0.30"
|
|
||||||
serde.workspace = true
|
|
||||||
serde_json.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
env_logger.workspace = true
|
|
||||||
@@ -1,825 +0,0 @@
|
|||||||
use log::debug;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::process::Command;
|
|
||||||
use sysinfo::System;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct PhysicalHost {
|
|
||||||
pub storage_drives: Vec<StorageDrive>,
|
|
||||||
pub storage_controller: StorageController,
|
|
||||||
pub memory_modules: Vec<MemoryModule>,
|
|
||||||
pub cpus: Vec<CPU>,
|
|
||||||
pub chipset: Chipset,
|
|
||||||
pub network_interfaces: Vec<NetworkInterface>,
|
|
||||||
pub management_interface: Option<ManagementInterface>,
|
|
||||||
pub host_uuid: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct StorageDrive {
|
|
||||||
pub name: String,
|
|
||||||
pub model: String,
|
|
||||||
pub serial: String,
|
|
||||||
pub size_bytes: u64,
|
|
||||||
pub logical_block_size: u32,
|
|
||||||
pub physical_block_size: u32,
|
|
||||||
pub rotational: bool,
|
|
||||||
pub wwn: Option<String>,
|
|
||||||
pub interface_type: String,
|
|
||||||
pub smart_status: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct StorageController {
|
|
||||||
pub name: String,
|
|
||||||
pub driver: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct MemoryModule {
|
|
||||||
pub size_bytes: u64,
|
|
||||||
pub speed_mhz: Option<u32>,
|
|
||||||
pub manufacturer: Option<String>,
|
|
||||||
pub part_number: Option<String>,
|
|
||||||
pub serial_number: Option<String>,
|
|
||||||
pub rank: Option<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct CPU {
|
|
||||||
pub model: String,
|
|
||||||
pub vendor: String,
|
|
||||||
pub cores: u32,
|
|
||||||
pub threads: u32,
|
|
||||||
pub frequency_mhz: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct Chipset {
|
|
||||||
pub name: String,
|
|
||||||
pub vendor: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct NetworkInterface {
|
|
||||||
pub name: String,
|
|
||||||
pub mac_address: String,
|
|
||||||
pub speed_mbps: Option<u32>,
|
|
||||||
pub is_up: bool,
|
|
||||||
pub mtu: u32,
|
|
||||||
pub ipv4_addresses: Vec<String>,
|
|
||||||
pub ipv6_addresses: Vec<String>,
|
|
||||||
pub driver: String,
|
|
||||||
pub firmware_version: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct ManagementInterface {
|
|
||||||
pub kind: String,
|
|
||||||
pub address: Option<String>,
|
|
||||||
pub firmware: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PhysicalHost {
|
|
||||||
pub fn gather() -> Result<Self, String> {
|
|
||||||
let mut sys = System::new_all();
|
|
||||||
sys.refresh_all();
|
|
||||||
|
|
||||||
Self::all_tools_available()?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
storage_drives: Self::gather_storage_drives()?,
|
|
||||||
storage_controller: Self::gather_storage_controller()?,
|
|
||||||
memory_modules: Self::gather_memory_modules()?,
|
|
||||||
cpus: Self::gather_cpus(&sys)?,
|
|
||||||
chipset: Self::gather_chipset()?,
|
|
||||||
network_interfaces: Self::gather_network_interfaces()?,
|
|
||||||
management_interface: Self::gather_management_interface()?,
|
|
||||||
host_uuid: Self::get_host_uuid()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn all_tools_available() -> Result<(), String> {
|
|
||||||
let required_tools = [
|
|
||||||
("lsblk", "--version"),
|
|
||||||
("lspci", "--version"),
|
|
||||||
("lsmod", "--version"),
|
|
||||||
("dmidecode", "--version"),
|
|
||||||
("smartctl", "--version"),
|
|
||||||
("ip", "route"), // No version flag available
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut missing_tools = Vec::new();
|
|
||||||
|
|
||||||
for (tool, tool_arg) in required_tools.iter() {
|
|
||||||
// First check if tool exists in PATH using which(1)
|
|
||||||
let exists = if let Ok(output) = Command::new("which").arg(tool).output() {
|
|
||||||
output.status.success()
|
|
||||||
} else {
|
|
||||||
// Fallback: manual PATH search if which(1) is unavailable
|
|
||||||
if let Ok(path_var) = std::env::var("PATH") {
|
|
||||||
path_var.split(':').any(|dir| {
|
|
||||||
let tool_path = std::path::Path::new(dir).join(tool);
|
|
||||||
tool_path.exists() && Self::is_executable(&tool_path)
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
missing_tools.push(*tool);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify tool is functional by checking version/help output
|
|
||||||
let mut cmd = Command::new(tool);
|
|
||||||
cmd.arg(tool_arg);
|
|
||||||
cmd.stdout(std::process::Stdio::null());
|
|
||||||
cmd.stderr(std::process::Stdio::null());
|
|
||||||
|
|
||||||
if let Ok(status) = cmd.status() {
|
|
||||||
if !status.success() {
|
|
||||||
missing_tools.push(*tool);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
missing_tools.push(*tool);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !missing_tools.is_empty() {
|
|
||||||
let missing_str = missing_tools
|
|
||||||
.iter()
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
.collect::<Vec<String>>()
|
|
||||||
.join(", ");
|
|
||||||
return Err(format!(
|
|
||||||
"The following required tools are not available: {}. Please install these tools to use PhysicalHost::gather()",
|
|
||||||
missing_str
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn is_executable(path: &std::path::Path) -> bool {
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
|
|
||||||
match std::fs::metadata(path) {
|
|
||||||
Ok(meta) => meta.permissions().mode() & 0o111 != 0,
|
|
||||||
Err(_) => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
fn is_executable(_path: &std::path::Path) -> bool {
|
|
||||||
// On non-Unix systems, we assume existence implies executability
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_storage_drives() -> Result<Vec<StorageDrive>, String> {
|
|
||||||
let mut drives = Vec::new();
|
|
||||||
|
|
||||||
// Use lsblk with JSON output for robust parsing
|
|
||||||
let output = Command::new("lsblk")
|
|
||||||
.args([
|
|
||||||
"-d",
|
|
||||||
"-o",
|
|
||||||
"NAME,MODEL,SERIAL,SIZE,ROTA,WWN",
|
|
||||||
"-n",
|
|
||||||
"-e",
|
|
||||||
"7",
|
|
||||||
"--json",
|
|
||||||
])
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute lsblk: {}", e))?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"lsblk command failed: {}",
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let json: Value = serde_json::from_slice(&output.stdout)
|
|
||||||
.map_err(|e| format!("Failed to parse lsblk JSON output: {}", e))?;
|
|
||||||
|
|
||||||
let blockdevices = json
|
|
||||||
.get("blockdevices")
|
|
||||||
.and_then(|v| v.as_array())
|
|
||||||
.ok_or("Invalid lsblk JSON: missing 'blockdevices' array")?;
|
|
||||||
|
|
||||||
for device in blockdevices {
|
|
||||||
let name = device
|
|
||||||
.get("name")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing 'name' in lsblk device")?
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
if name.is_empty() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let model = device
|
|
||||||
.get("model")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.map(|s| s.trim().to_string())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let serial = device
|
|
||||||
.get("serial")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.map(|s| s.trim().to_string())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let size_str = device
|
|
||||||
.get("size")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing 'size' in lsblk device")?;
|
|
||||||
let size_bytes = Self::parse_size(size_str)?;
|
|
||||||
|
|
||||||
let rotational = device
|
|
||||||
.get("rota")
|
|
||||||
.and_then(|v| v.as_bool())
|
|
||||||
.ok_or("Missing 'rota' in lsblk device")?;
|
|
||||||
|
|
||||||
let wwn = device
|
|
||||||
.get("wwn")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.map(|s| s.trim().to_string())
|
|
||||||
.filter(|s| !s.is_empty() && s != "null");
|
|
||||||
|
|
||||||
let device_path = Path::new("/sys/block").join(&name);
|
|
||||||
|
|
||||||
let logical_block_size = Self::read_sysfs_u32(
|
|
||||||
&device_path.join("queue/logical_block_size"),
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("Failed to read logical block size for {}: {}", name, e))?;
|
|
||||||
|
|
||||||
let physical_block_size = Self::read_sysfs_u32(
|
|
||||||
&device_path.join("queue/physical_block_size"),
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("Failed to read physical block size for {}: {}", name, e))?;
|
|
||||||
|
|
||||||
let interface_type = Self::get_interface_type(&name, &device_path)?;
|
|
||||||
let smart_status = Self::get_smart_status(&name)?;
|
|
||||||
|
|
||||||
let mut drive = StorageDrive {
|
|
||||||
name: name.clone(),
|
|
||||||
model,
|
|
||||||
serial,
|
|
||||||
size_bytes,
|
|
||||||
logical_block_size,
|
|
||||||
physical_block_size,
|
|
||||||
rotational,
|
|
||||||
wwn,
|
|
||||||
interface_type,
|
|
||||||
smart_status,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Enhance with additional sysfs info if available
|
|
||||||
if device_path.exists() {
|
|
||||||
if drive.model.is_empty() {
|
|
||||||
drive.model = Self::read_sysfs_string(&device_path.join("device/model"))
|
|
||||||
.map_err(|e| format!("Failed to read model for {}: {}", name, e))?;
|
|
||||||
}
|
|
||||||
if drive.serial.is_empty() {
|
|
||||||
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"))
|
|
||||||
.map_err(|e| format!("Failed to read serial for {}: {}", name, e))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drives.push(drive);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(drives)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_storage_controller() -> Result<StorageController, String> {
|
|
||||||
let mut controller = StorageController {
|
|
||||||
name: "Unknown".to_string(),
|
|
||||||
driver: "Unknown".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use lspci with JSON output if available
|
|
||||||
let output = Command::new("lspci")
|
|
||||||
.args(["-nn", "-d", "::0100", "-J"]) // Storage controllers class with JSON
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute lspci: {}", e))?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
|
||||||
let json: Value = serde_json::from_slice(&output.stdout)
|
|
||||||
.map_err(|e| format!("Failed to parse lspci JSON output: {}", e))?;
|
|
||||||
|
|
||||||
if let Some(devices) = json.as_array() {
|
|
||||||
for device in devices {
|
|
||||||
if let Some(device_info) = device.as_object()
|
|
||||||
&& let Some(name) = device_info
|
|
||||||
.get("device")
|
|
||||||
.and_then(|v| v.as_object())
|
|
||||||
.and_then(|v| v.get("name"))
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
{
|
|
||||||
controller.name = name.to_string();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to text output if JSON fails or no device found
|
|
||||||
if controller.name == "Unknown" {
|
|
||||||
let output = Command::new("lspci")
|
|
||||||
.args(["-nn", "-d", "::0100"]) // Storage controllers class
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute lspci (fallback): {}", e))?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
|
||||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
|
||||||
if let Some(line) = output_str.lines().next() {
|
|
||||||
let parts: Vec<&str> = line.split(':').collect();
|
|
||||||
if parts.len() > 2 {
|
|
||||||
controller.name = parts[2].trim().to_string();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get driver info from lsmod
|
|
||||||
let output = Command::new("lsmod")
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute lsmod: {}", e))?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
|
||||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
|
||||||
for line in output_str.lines() {
|
|
||||||
if line.contains("ahci")
|
|
||||||
|| line.contains("nvme")
|
|
||||||
|| line.contains("megaraid")
|
|
||||||
|| line.contains("mpt3sas")
|
|
||||||
{
|
|
||||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
||||||
if !parts.is_empty() {
|
|
||||||
controller.driver = parts[0].to_string();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(controller)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_memory_modules() -> Result<Vec<MemoryModule>, String> {
|
|
||||||
let mut modules = Vec::new();
|
|
||||||
|
|
||||||
let output = Command::new("dmidecode")
|
|
||||||
.arg("--type")
|
|
||||||
.arg("17")
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute dmidecode: {}", e))?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"dmidecode command failed: {}",
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let output_str = String::from_utf8(output.stdout)
|
|
||||||
.map_err(|e| format!("Failed to parse dmidecode output: {}", e))?;
|
|
||||||
|
|
||||||
let sections: Vec<&str> = output_str.split("Memory Device").collect();
|
|
||||||
|
|
||||||
for section in sections.into_iter().skip(1) {
|
|
||||||
let mut module = MemoryModule {
|
|
||||||
size_bytes: 0,
|
|
||||||
speed_mhz: None,
|
|
||||||
manufacturer: None,
|
|
||||||
part_number: None,
|
|
||||||
serial_number: None,
|
|
||||||
rank: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
for line in section.lines() {
|
|
||||||
let line = line.trim();
|
|
||||||
if let Some(size_str) = line.strip_prefix("Size: ") {
|
|
||||||
if size_str != "No Module Installed"
|
|
||||||
&& let Some((num, unit)) = size_str.split_once(' ')
|
|
||||||
&& let Ok(num) = num.parse::<u64>()
|
|
||||||
{
|
|
||||||
module.size_bytes = match unit {
|
|
||||||
"MB" => num * 1024 * 1024,
|
|
||||||
"GB" => num * 1024 * 1024 * 1024,
|
|
||||||
"KB" => num * 1024,
|
|
||||||
_ => 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else if let Some(speed_str) = line.strip_prefix("Speed: ") {
|
|
||||||
if let Some((num, _unit)) = speed_str.split_once(' ') {
|
|
||||||
module.speed_mhz = num.parse().ok();
|
|
||||||
}
|
|
||||||
} else if let Some(man) = line.strip_prefix("Manufacturer: ") {
|
|
||||||
module.manufacturer = Some(man.to_string());
|
|
||||||
} else if let Some(part) = line.strip_prefix("Part Number: ") {
|
|
||||||
module.part_number = Some(part.to_string());
|
|
||||||
} else if let Some(serial) = line.strip_prefix("Serial Number: ") {
|
|
||||||
module.serial_number = Some(serial.to_string());
|
|
||||||
} else if let Some(rank) = line.strip_prefix("Rank: ") {
|
|
||||||
module.rank = rank.parse().ok();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if module.size_bytes > 0 {
|
|
||||||
modules.push(module);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(modules)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_cpus(sys: &System) -> Result<Vec<CPU>, String> {
|
|
||||||
let mut cpus = Vec::new();
|
|
||||||
let global_cpu = sys.global_cpu_info();
|
|
||||||
|
|
||||||
cpus.push(CPU {
|
|
||||||
model: global_cpu.brand().to_string(),
|
|
||||||
vendor: global_cpu.vendor_id().to_string(),
|
|
||||||
cores: sys.physical_core_count().unwrap_or(1) as u32,
|
|
||||||
threads: sys.cpus().len() as u32,
|
|
||||||
frequency_mhz: global_cpu.frequency(),
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(cpus)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_chipset() -> Result<Chipset, String> {
|
|
||||||
Ok(Chipset {
|
|
||||||
name: Self::read_dmi("baseboard-product-name")?,
|
|
||||||
vendor: Self::read_dmi("baseboard-manufacturer")?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_network_interfaces() -> Result<Vec<NetworkInterface>, String> {
|
|
||||||
let mut interfaces = Vec::new();
|
|
||||||
let sys_net_path = Path::new("/sys/class/net");
|
|
||||||
|
|
||||||
let entries = fs::read_dir(sys_net_path)
|
|
||||||
.map_err(|e| format!("Failed to read /sys/class/net: {}", e))?;
|
|
||||||
|
|
||||||
for entry in entries {
|
|
||||||
let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?;
|
|
||||||
let iface_name = entry
|
|
||||||
.file_name()
|
|
||||||
.into_string()
|
|
||||||
.map_err(|_| "Invalid UTF-8 in interface name")?;
|
|
||||||
let iface_path = entry.path();
|
|
||||||
|
|
||||||
// Skip virtual interfaces
|
|
||||||
if iface_name.starts_with("lo")
|
|
||||||
|| iface_name.starts_with("docker")
|
|
||||||
|| iface_name.starts_with("virbr")
|
|
||||||
|| iface_name.starts_with("veth")
|
|
||||||
|| iface_name.starts_with("br-")
|
|
||||||
|| iface_name.starts_with("tun")
|
|
||||||
|| iface_name.starts_with("wg")
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a physical interface by looking for device directory
|
|
||||||
if !iface_path.join("device").exists() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
|
|
||||||
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
let speed_mbps = if iface_path.join("speed").exists() {
|
|
||||||
match Self::read_sysfs_u32(&iface_path.join("speed")) {
|
|
||||||
Ok(speed) => Some(speed),
|
|
||||||
Err(e) => {
|
|
||||||
debug!(
|
|
||||||
"Failed to read speed for {}: {} . This is expected to fail on wifi interfaces.",
|
|
||||||
iface_name, e
|
|
||||||
);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"))
|
|
||||||
.map_err(|e| format!("Failed to read operstate for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu"))
|
|
||||||
.map_err(|e| format!("Failed to read MTU for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
let driver =
|
|
||||||
Self::read_sysfs_symlink_basename(&iface_path.join("device/driver/module"))
|
|
||||||
.map_err(|e| format!("Failed to read driver for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
let firmware_version = Self::read_sysfs_opt_string(
|
|
||||||
&iface_path.join("device/firmware_version"),
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("Failed to read firmware version for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
// Get IP addresses using ip command with JSON output
|
|
||||||
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name)
|
|
||||||
.map_err(|e| format!("Failed to get IP addresses for {}: {}", iface_name, e))?;
|
|
||||||
|
|
||||||
interfaces.push(NetworkInterface {
|
|
||||||
name: iface_name,
|
|
||||||
mac_address,
|
|
||||||
speed_mbps,
|
|
||||||
is_up: operstate == "up",
|
|
||||||
mtu,
|
|
||||||
ipv4_addresses,
|
|
||||||
ipv6_addresses,
|
|
||||||
driver,
|
|
||||||
firmware_version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(interfaces)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gather_management_interface() -> Result<Option<ManagementInterface>, String> {
|
|
||||||
if Path::new("/dev/ipmi0").exists() {
|
|
||||||
Ok(Some(ManagementInterface {
|
|
||||||
kind: "IPMI".to_string(),
|
|
||||||
address: None,
|
|
||||||
firmware: Some(Self::read_dmi("bios-version")?),
|
|
||||||
}))
|
|
||||||
} else if Path::new("/sys/class/misc/mei").exists() {
|
|
||||||
Ok(Some(ManagementInterface {
|
|
||||||
kind: "Intel ME".to_string(),
|
|
||||||
address: None,
|
|
||||||
firmware: None,
|
|
||||||
}))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_host_uuid() -> Result<String, String> {
|
|
||||||
Self::read_dmi("system-uuid")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper methods
|
|
||||||
fn read_sysfs_string(path: &Path) -> Result<String, String> {
|
|
||||||
fs::read_to_string(path)
|
|
||||||
.map(|s| s.trim().to_string())
|
|
||||||
.map_err(|e| format!("Failed to read {}: {}", path.display(), e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_sysfs_opt_string(path: &Path) -> Result<Option<String>, String> {
|
|
||||||
match fs::read_to_string(path) {
|
|
||||||
Ok(s) => {
|
|
||||||
let s = s.trim().to_string();
|
|
||||||
Ok(if s.is_empty() { None } else { Some(s) })
|
|
||||||
}
|
|
||||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
|
|
||||||
Err(e) => Err(format!("Failed to read {}: {}", path.display(), e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_sysfs_u32(path: &Path) -> Result<u32, String> {
|
|
||||||
fs::read_to_string(path)
|
|
||||||
.map_err(|e| format!("Failed to read {}: {}", path.display(), e))?
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map_err(|e| format!("Failed to parse {}: {}", path.display(), e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_sysfs_symlink_basename(path: &Path) -> Result<String, String> {
|
|
||||||
match fs::read_link(path) {
|
|
||||||
Ok(target_path) => match target_path.file_name() {
|
|
||||||
Some(name_osstr) => match name_osstr.to_str() {
|
|
||||||
Some(name_str) => Ok(name_str.to_string()),
|
|
||||||
None => Err(format!(
|
|
||||||
"Symlink target basename is not valid UTF-8: {}",
|
|
||||||
target_path.display()
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
None => Err(format!(
|
|
||||||
"Symlink target has no basename: {} -> {}",
|
|
||||||
path.display(),
|
|
||||||
target_path.display()
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Err(format!(
|
|
||||||
"Could not resolve symlink for path : {}",
|
|
||||||
path.display()
|
|
||||||
)),
|
|
||||||
Err(e) => Err(format!("Failed to read symlink {}: {}", path.display(), e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_dmi(field: &str) -> Result<String, String> {
|
|
||||||
let output = Command::new("dmidecode")
|
|
||||||
.arg("-s")
|
|
||||||
.arg(field)
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute dmidecode for field {}: {}", field, e))?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"dmidecode command failed for field {}: {}",
|
|
||||||
field,
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
String::from_utf8(output.stdout)
|
|
||||||
.map(|s| s.trim().to_string())
|
|
||||||
.map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Failed to parse dmidecode output for field {}: {}",
|
|
||||||
field, e
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_interface_type(device_name: &str, device_path: &Path) -> Result<String, String> {
|
|
||||||
if device_name.starts_with("nvme") {
|
|
||||||
Ok("NVMe".to_string())
|
|
||||||
} else if device_name.starts_with("sd") {
|
|
||||||
Ok("SATA".to_string())
|
|
||||||
} else if device_name.starts_with("hd") {
|
|
||||||
Ok("IDE".to_string())
|
|
||||||
} else if device_name.starts_with("vd") {
|
|
||||||
Ok("VirtIO".to_string())
|
|
||||||
} else {
|
|
||||||
// Try to determine from device path
|
|
||||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))?;
|
|
||||||
Ok(subsystem
|
|
||||||
.split('/')
|
|
||||||
.next_back()
|
|
||||||
.unwrap_or("Unknown")
|
|
||||||
.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_smart_status(device_name: &str) -> Result<Option<String>, String> {
|
|
||||||
let output = Command::new("smartctl")
|
|
||||||
.arg("-H")
|
|
||||||
.arg(format!("/dev/{}", device_name))
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute smartctl for {}: {}", device_name, e))?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let stdout = String::from_utf8(output.stdout)
|
|
||||||
.map_err(|e| format!("Failed to parse smartctl output for {}: {}", device_name, e))?;
|
|
||||||
|
|
||||||
for line in stdout.lines() {
|
|
||||||
if line.contains("SMART overall-health self-assessment") {
|
|
||||||
if let Some(status) = line.split(':').nth(1) {
|
|
||||||
return Ok(Some(status.trim().to_string()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_size(size_str: &str) -> Result<u64, String> {
|
|
||||||
debug!("Parsing size_str '{size_str}'");
|
|
||||||
let size;
|
|
||||||
if size_str.ends_with('T') {
|
|
||||||
size = size_str[..size_str.len() - 1]
|
|
||||||
.parse::<f64>()
|
|
||||||
.map(|t| t * 1024.0 * 1024.0 * 1024.0 * 1024.0)
|
|
||||||
.map_err(|e| format!("Failed to parse T size '{}': {}", size_str, e))
|
|
||||||
} else if size_str.ends_with('G') {
|
|
||||||
size = size_str[..size_str.len() - 1]
|
|
||||||
.parse::<f64>()
|
|
||||||
.map(|g| g * 1024.0 * 1024.0 * 1024.0)
|
|
||||||
.map_err(|e| format!("Failed to parse G size '{}': {}", size_str, e))
|
|
||||||
} else if size_str.ends_with('M') {
|
|
||||||
size = size_str[..size_str.len() - 1]
|
|
||||||
.parse::<f64>()
|
|
||||||
.map(|m| m * 1024.0 * 1024.0)
|
|
||||||
.map_err(|e| format!("Failed to parse M size '{}': {}", size_str, e))
|
|
||||||
} else if size_str.ends_with('K') {
|
|
||||||
size = size_str[..size_str.len() - 1]
|
|
||||||
.parse::<f64>()
|
|
||||||
.map(|k| k * 1024.0)
|
|
||||||
.map_err(|e| format!("Failed to parse K size '{}': {}", size_str, e))
|
|
||||||
} else if size_str.ends_with('B') {
|
|
||||||
size = size_str[..size_str.len() - 1]
|
|
||||||
.parse::<f64>()
|
|
||||||
.map_err(|e| format!("Failed to parse B size '{}': {}", size_str, e))
|
|
||||||
} else {
|
|
||||||
size = size_str
|
|
||||||
.parse::<f64>()
|
|
||||||
.map_err(|e| format!("Failed to parse size '{}': {}", size_str, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
size.map(|s| s as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_interface_ips_json(iface_name: &str) -> Result<(Vec<String>, Vec<String>), String> {
|
|
||||||
let mut ipv4 = Vec::new();
|
|
||||||
let mut ipv6 = Vec::new();
|
|
||||||
|
|
||||||
// Get IPv4 addresses using JSON output
|
|
||||||
let output = Command::new("ip")
|
|
||||||
.args(["-j", "-4", "addr", "show", iface_name])
|
|
||||||
.output()
|
|
||||||
.map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Failed to execute ip command for IPv4 on {}: {}",
|
|
||||||
iface_name, e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"ip command for IPv4 on {} failed: {}",
|
|
||||||
iface_name,
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let json: Value = serde_json::from_slice(&output.stdout).map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Failed to parse ip JSON output for IPv4 on {}: {}",
|
|
||||||
iface_name, e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(addrs) = json.as_array() {
|
|
||||||
for addr_info in addrs {
|
|
||||||
if let Some(addr_info_obj) = addr_info.as_object()
|
|
||||||
&& let Some(addr_info) =
|
|
||||||
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
|
|
||||||
{
|
|
||||||
for addr in addr_info {
|
|
||||||
if let Some(addr_obj) = addr.as_object()
|
|
||||||
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
|
|
||||||
{
|
|
||||||
ipv4.push(ip.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get IPv6 addresses using JSON output
|
|
||||||
let output = Command::new("ip")
|
|
||||||
.args(["-j", "-6", "addr", "show", iface_name])
|
|
||||||
.output()
|
|
||||||
.map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Failed to execute ip command for IPv6 on {}: {}",
|
|
||||||
iface_name, e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"ip command for IPv6 on {} failed: {}",
|
|
||||||
iface_name,
|
|
||||||
String::from_utf8_lossy(&output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let json: Value = serde_json::from_slice(&output.stdout).map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Failed to parse ip JSON output for IPv6 on {}: {}",
|
|
||||||
iface_name, e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(addrs) = json.as_array() {
|
|
||||||
for addr_info in addrs {
|
|
||||||
if let Some(addr_info_obj) = addr_info.as_object()
|
|
||||||
&& let Some(addr_info) =
|
|
||||||
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
|
|
||||||
{
|
|
||||||
for addr in addr_info {
|
|
||||||
if let Some(addr_obj) = addr.as_object()
|
|
||||||
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
|
|
||||||
{
|
|
||||||
// Skip link-local addresses
|
|
||||||
if !ip.starts_with("fe80::") {
|
|
||||||
ipv6.push(ip.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((ipv4, ipv6))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
// src/main.rs
|
|
||||||
use actix_web::{App, HttpServer, Responder, get};
|
|
||||||
use hwinfo::PhysicalHost;
|
|
||||||
use std::env;
|
|
||||||
|
|
||||||
mod hwinfo;
|
|
||||||
|
|
||||||
#[get("/inventory")]
|
|
||||||
async fn inventory() -> impl Responder {
|
|
||||||
log::info!("Received inventory request");
|
|
||||||
let host = PhysicalHost::gather();
|
|
||||||
match host {
|
|
||||||
Ok(host) => {
|
|
||||||
log::info!("Inventory data gathered successfully");
|
|
||||||
actix_web::HttpResponse::Ok().json(host)
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
log::error!("Inventory data gathering FAILED");
|
|
||||||
actix_web::HttpResponse::InternalServerError().json(error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[actix_web::main]
|
|
||||||
async fn main() -> std::io::Result<()> {
|
|
||||||
env_logger::init();
|
|
||||||
|
|
||||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
|
|
||||||
let bind_addr = format!("0.0.0.0:{}", port);
|
|
||||||
|
|
||||||
log::info!("Starting inventory agent on {}", bind_addr);
|
|
||||||
|
|
||||||
HttpServer::new(|| App::new().service(inventory))
|
|
||||||
.bind(&bind_addr)?
|
|
||||||
.run()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony-secret"
|
|
||||||
edition = "2024"
|
|
||||||
version.workspace = true
|
|
||||||
readme.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
|
|
||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
|
||||||
serde_json = "1.0.127"
|
|
||||||
thiserror.workspace = true
|
|
||||||
lazy_static.workspace = true
|
|
||||||
directories.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
infisical = "0.0.2"
|
|
||||||
tokio.workspace = true
|
|
||||||
async-trait.workspace = true
|
|
||||||
http.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
pretty_assertions.workspace = true
|
|
||||||
tempfile.workspace = true
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
use lazy_static::lazy_static;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref SECRET_NAMESPACE: String =
|
|
||||||
std::env::var("HARMONY_SECRET_NAMESPACE").expect("HARMONY_SECRET_NAMESPACE environment variable is required, it should contain the name of the project you are working on to access its secrets");
|
|
||||||
pub static ref SECRET_STORE: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_STORE").ok();
|
|
||||||
pub static ref INFISICAL_URL: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_INFISICAL_URL").ok();
|
|
||||||
pub static ref INFISICAL_PROJECT_ID: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_INFISICAL_PROJECT_ID").ok();
|
|
||||||
pub static ref INFISICAL_ENVIRONMENT: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_INFISICAL_ENVIRONMENT").ok();
|
|
||||||
pub static ref INFISICAL_CLIENT_ID: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_ID").ok();
|
|
||||||
pub static ref INFISICAL_CLIENT_SECRET: Option<String> =
|
|
||||||
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_SECRET").ok();
|
|
||||||
}
|
|
||||||
@@ -1,166 +0,0 @@
|
|||||||
pub mod config;
|
|
||||||
mod store;
|
|
||||||
|
|
||||||
use crate::config::SECRET_NAMESPACE;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use config::INFISICAL_CLIENT_ID;
|
|
||||||
use config::INFISICAL_CLIENT_SECRET;
|
|
||||||
use config::INFISICAL_ENVIRONMENT;
|
|
||||||
use config::INFISICAL_PROJECT_ID;
|
|
||||||
use config::INFISICAL_URL;
|
|
||||||
use config::SECRET_STORE;
|
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
|
||||||
use std::fmt;
|
|
||||||
use store::InfisicalSecretStore;
|
|
||||||
use store::LocalFileSecretStore;
|
|
||||||
use thiserror::Error;
|
|
||||||
use tokio::sync::OnceCell;
|
|
||||||
|
|
||||||
pub use harmony_secret_derive::Secret;
|
|
||||||
|
|
||||||
// The Secret trait remains the same.
|
|
||||||
pub trait Secret: Serialize + DeserializeOwned + Sized {
|
|
||||||
const KEY: &'static str;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The error enum remains the same.
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
pub enum SecretStoreError {
|
|
||||||
#[error("Secret not found for key '{key}' in namespace '{namespace}'")]
|
|
||||||
NotFound { namespace: String, key: String },
|
|
||||||
#[error("Failed to deserialize secret for key '{key}': {source}")]
|
|
||||||
Deserialization {
|
|
||||||
key: String,
|
|
||||||
source: serde_json::Error,
|
|
||||||
},
|
|
||||||
#[error("Failed to serialize secret for key '{key}': {source}")]
|
|
||||||
Serialization {
|
|
||||||
key: String,
|
|
||||||
source: serde_json::Error,
|
|
||||||
},
|
|
||||||
#[error("Underlying storage error: {0}")]
|
|
||||||
Store(#[from] Box<dyn std::error::Error + Send + Sync>),
|
|
||||||
}
|
|
||||||
|
|
||||||
// The trait is now async!
|
|
||||||
#[async_trait]
|
|
||||||
pub trait SecretStore: fmt::Debug + Send + Sync {
|
|
||||||
async fn get_raw(&self, namespace: &str, key: &str) -> Result<Vec<u8>, SecretStoreError>;
|
|
||||||
async fn set_raw(
|
|
||||||
&self,
|
|
||||||
namespace: &str,
|
|
||||||
key: &str,
|
|
||||||
value: &[u8],
|
|
||||||
) -> Result<(), SecretStoreError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use OnceCell for async-friendly, one-time initialization.
|
|
||||||
static SECRET_MANAGER: OnceCell<SecretManager> = OnceCell::const_new();
|
|
||||||
|
|
||||||
/// Initializes and returns a reference to the global SecretManager.
|
|
||||||
async fn get_secret_manager() -> &'static SecretManager {
|
|
||||||
SECRET_MANAGER.get_or_init(init_secret_manager).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The async initialization function for the SecretManager.
|
|
||||||
async fn init_secret_manager() -> SecretManager {
|
|
||||||
let default_secret_score = "infisical".to_string();
|
|
||||||
let store_type = SECRET_STORE.as_ref().unwrap_or(&default_secret_score);
|
|
||||||
|
|
||||||
let store: Box<dyn SecretStore> = match store_type.as_str() {
|
|
||||||
"file" => Box::new(LocalFileSecretStore::default()),
|
|
||||||
"infisical" | _ => {
|
|
||||||
let store = InfisicalSecretStore::new(
|
|
||||||
INFISICAL_URL.clone().expect("Infisical url must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_URL"),
|
|
||||||
INFISICAL_PROJECT_ID.clone().expect("Infisical project id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_PROJECT_ID"),
|
|
||||||
INFISICAL_ENVIRONMENT.clone().expect("Infisical environment must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_ENVIRONMENT"),
|
|
||||||
INFISICAL_CLIENT_ID.clone().expect("Infisical client id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_ID"),
|
|
||||||
INFISICAL_CLIENT_SECRET.clone().expect("Infisical client secret must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_SECRET"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to initialize Infisical secret store");
|
|
||||||
Box::new(store)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
SecretManager::new(SECRET_NAMESPACE.clone(), store)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Manages the lifecycle of secrets, providing a simple static API.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SecretManager {
|
|
||||||
namespace: String,
|
|
||||||
store: Box<dyn SecretStore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SecretManager {
|
|
||||||
fn new(namespace: String, store: Box<dyn SecretStore>) -> Self {
|
|
||||||
Self { namespace, store }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves and deserializes a secret.
|
|
||||||
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
|
|
||||||
let manager = get_secret_manager().await;
|
|
||||||
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
|
|
||||||
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
|
|
||||||
key: T::KEY.to_string(),
|
|
||||||
source: e,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes and stores a secret.
|
|
||||||
pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> {
|
|
||||||
let manager = get_secret_manager().await;
|
|
||||||
let raw_value =
|
|
||||||
serde_json::to_vec(secret).map_err(|e| SecretStoreError::Serialization {
|
|
||||||
key: T::KEY.to_string(),
|
|
||||||
source: e,
|
|
||||||
})?;
|
|
||||||
manager
|
|
||||||
.store
|
|
||||||
.set_raw(&manager.namespace, T::KEY, &raw_value)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
struct TestUserMeta {
|
|
||||||
labels: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
|
||||||
struct TestSecret {
|
|
||||||
user: String,
|
|
||||||
password: String,
|
|
||||||
metadata: TestUserMeta,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(secrete2etest)]
|
|
||||||
#[tokio::test]
|
|
||||||
async fn set_and_retrieve_secret() {
|
|
||||||
let secret = TestSecret {
|
|
||||||
user: String::from("user"),
|
|
||||||
password: String::from("password"),
|
|
||||||
metadata: TestUserMeta {
|
|
||||||
labels: vec![
|
|
||||||
String::from("label1"),
|
|
||||||
String::from("label2"),
|
|
||||||
String::from(
|
|
||||||
"some longet label with \" special @#%$)(udiojcia[]]] \"'asdij'' characters Nдs はにほへとちり าฟันพัฒนา yağız şoföre ç <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> 👩👩👧👦 /span> 👩👧👦 and why not emojis ",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
SecretManager::set(&secret).await.unwrap();
|
|
||||||
let value = SecretManager::get::<TestSecret>().await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(value, secret);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
use crate::{SecretStore, SecretStoreError};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use infisical::{
|
|
||||||
AuthMethod, InfisicalError,
|
|
||||||
client::Client,
|
|
||||||
secrets::{CreateSecretRequest, GetSecretRequest, UpdateSecretRequest},
|
|
||||||
};
|
|
||||||
use log::{info, warn};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct InfisicalSecretStore {
|
|
||||||
client: Client,
|
|
||||||
project_id: String,
|
|
||||||
environment: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InfisicalSecretStore {
|
|
||||||
/// Creates a new, authenticated Infisical client.
|
|
||||||
pub async fn new(
|
|
||||||
base_url: String,
|
|
||||||
project_id: String,
|
|
||||||
environment: String,
|
|
||||||
client_id: String,
|
|
||||||
client_secret: String,
|
|
||||||
) -> Result<Self, InfisicalError> {
|
|
||||||
info!("INFISICAL_STORE: Initializing client for URL: {base_url}");
|
|
||||||
|
|
||||||
// The builder and login logic remains the same.
|
|
||||||
let mut client = Client::builder().base_url(base_url).build().await?;
|
|
||||||
let auth_method = AuthMethod::new_universal_auth(client_id, client_secret);
|
|
||||||
client.login(auth_method).await?;
|
|
||||||
|
|
||||||
info!("INFISICAL_STORE: Client authenticated successfully.");
|
|
||||||
Ok(Self {
|
|
||||||
client,
|
|
||||||
project_id,
|
|
||||||
environment,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SecretStore for InfisicalSecretStore {
|
|
||||||
async fn get_raw(&self, _environment: &str, key: &str) -> Result<Vec<u8>, SecretStoreError> {
|
|
||||||
let environment = &self.environment;
|
|
||||||
info!("INFISICAL_STORE: Getting key '{key}' from environment '{environment}'");
|
|
||||||
|
|
||||||
let request = GetSecretRequest::builder(key, &self.project_id, environment).build();
|
|
||||||
|
|
||||||
match self.client.secrets().get(request).await {
|
|
||||||
Ok(secret) => Ok(secret.secret_value.into_bytes()),
|
|
||||||
Err(e) => {
|
|
||||||
// Correctly match against the actual InfisicalError enum.
|
|
||||||
match e {
|
|
||||||
// The specific case for a 404 Not Found error.
|
|
||||||
InfisicalError::HttpError { status, .. }
|
|
||||||
if status == http::StatusCode::NOT_FOUND =>
|
|
||||||
{
|
|
||||||
Err(SecretStoreError::NotFound {
|
|
||||||
namespace: environment.to_string(),
|
|
||||||
key: key.to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// For all other errors, wrap them in our generic Store error.
|
|
||||||
_ => Err(SecretStoreError::Store(Box::new(e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn set_raw(
|
|
||||||
&self,
|
|
||||||
_environment: &str,
|
|
||||||
key: &str,
|
|
||||||
val: &[u8],
|
|
||||||
) -> Result<(), SecretStoreError> {
|
|
||||||
info!(
|
|
||||||
"INFISICAL_STORE: Setting key '{key}' in environment '{}'",
|
|
||||||
self.environment
|
|
||||||
);
|
|
||||||
let value_str =
|
|
||||||
String::from_utf8(val.to_vec()).map_err(|e| SecretStoreError::Store(Box::new(e)))?;
|
|
||||||
|
|
||||||
// --- Upsert Logic ---
|
|
||||||
// First, attempt to update the secret.
|
|
||||||
let update_req = UpdateSecretRequest::builder(key, &self.project_id, &self.environment)
|
|
||||||
.secret_value(&value_str)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
match self.client.secrets().update(update_req).await {
|
|
||||||
Ok(_) => {
|
|
||||||
info!("INFISICAL_STORE: Successfully updated secret '{key}'.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
// If the update failed, check if it was because the secret doesn't exist.
|
|
||||||
match e {
|
|
||||||
InfisicalError::HttpError { status, .. }
|
|
||||||
if status == http::StatusCode::NOT_FOUND =>
|
|
||||||
{
|
|
||||||
// The secret was not found, so we create it instead.
|
|
||||||
warn!(
|
|
||||||
"INFISICAL_STORE: Secret '{key}' not found for update, attempting to create it."
|
|
||||||
);
|
|
||||||
let create_req = CreateSecretRequest::builder(
|
|
||||||
key,
|
|
||||||
&value_str,
|
|
||||||
&self.project_id,
|
|
||||||
&self.environment,
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
// Handle potential errors during creation.
|
|
||||||
self.client
|
|
||||||
.secrets()
|
|
||||||
.create(create_req)
|
|
||||||
.await
|
|
||||||
.map_err(|create_err| SecretStoreError::Store(Box::new(create_err)))?;
|
|
||||||
|
|
||||||
info!("INFISICAL_STORE: Successfully created secret '{key}'.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
// Any other error during update is a genuine failure.
|
|
||||||
_ => Err(SecretStoreError::Store(Box::new(e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use log::info;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use crate::{SecretStore, SecretStoreError};
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct LocalFileSecretStore;
|
|
||||||
|
|
||||||
impl LocalFileSecretStore {
|
|
||||||
/// Helper to consistently generate the secret file path.
|
|
||||||
fn get_file_path(base_dir: &Path, ns: &str, key: &str) -> PathBuf {
|
|
||||||
base_dir.join(format!("{ns}_{key}.json"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SecretStore for LocalFileSecretStore {
|
|
||||||
async fn get_raw(&self, ns: &str, key: &str) -> Result<Vec<u8>, SecretStoreError> {
|
|
||||||
let data_dir = directories::BaseDirs::new()
|
|
||||||
.expect("Could not find a valid home directory")
|
|
||||||
.data_dir()
|
|
||||||
.join("harmony")
|
|
||||||
.join("secrets");
|
|
||||||
|
|
||||||
let file_path = Self::get_file_path(&data_dir, ns, key);
|
|
||||||
info!(
|
|
||||||
"LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}",
|
|
||||||
file_path.display()
|
|
||||||
);
|
|
||||||
|
|
||||||
tokio::fs::read(&file_path)
|
|
||||||
.await
|
|
||||||
.map_err(|_| SecretStoreError::NotFound {
|
|
||||||
namespace: ns.to_string(),
|
|
||||||
key: key.to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn set_raw(&self, ns: &str, key: &str, val: &[u8]) -> Result<(), SecretStoreError> {
|
|
||||||
let data_dir = directories::BaseDirs::new()
|
|
||||||
.expect("Could not find a valid home directory")
|
|
||||||
.data_dir()
|
|
||||||
.join("harmony")
|
|
||||||
.join("secrets");
|
|
||||||
|
|
||||||
let file_path = Self::get_file_path(&data_dir, ns, key);
|
|
||||||
info!(
|
|
||||||
"LOCAL_STORE: Setting key '{key}' in namespace '{ns}' at {}",
|
|
||||||
file_path.display()
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(parent_dir) = file_path.parent() {
|
|
||||||
tokio::fs::create_dir_all(parent_dir)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::fs::write(&file_path, val)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SecretStoreError::Store(Box::new(e)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use tempfile::tempdir;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_set_and_get_raw_successfully() {
|
|
||||||
let dir = tempdir().unwrap();
|
|
||||||
let store = LocalFileSecretStore::default();
|
|
||||||
let ns = "test-ns";
|
|
||||||
let key = "test-key";
|
|
||||||
let value = b"{\"data\":\"test-value\"}";
|
|
||||||
|
|
||||||
// To test the store directly, we override the base directory logic.
|
|
||||||
// For this test, we'll manually construct the path within our temp dir.
|
|
||||||
let file_path = LocalFileSecretStore::get_file_path(dir.path(), ns, key);
|
|
||||||
|
|
||||||
// Manually write to the temp path to simulate the store's behavior
|
|
||||||
tokio::fs::create_dir_all(file_path.parent().unwrap())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
tokio::fs::write(&file_path, value).await.unwrap();
|
|
||||||
|
|
||||||
// Now, test get_raw by reading from that same temp path (by mocking the path logic)
|
|
||||||
let retrieved_value = tokio::fs::read(&file_path).await.unwrap();
|
|
||||||
assert_eq!(retrieved_value, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_get_raw_not_found() {
|
|
||||||
let dir = tempdir().unwrap();
|
|
||||||
let ns = "test-ns";
|
|
||||||
let key = "non-existent-key";
|
|
||||||
|
|
||||||
// We need to check if reading a non-existent file gives the correct error
|
|
||||||
let file_path = LocalFileSecretStore::get_file_path(dir.path(), ns, key);
|
|
||||||
let result = tokio::fs::read(&file_path).await;
|
|
||||||
|
|
||||||
assert!(matches!(result, Err(_)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
mod infisical;
|
|
||||||
mod local_file;
|
|
||||||
pub use infisical::*;
|
|
||||||
pub use local_file::*;
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
export HARMONY_SECRET_NAMESPACE=harmony_test_secrets
|
|
||||||
export HARMONY_SECRET_INFISICAL_URL=http://localhost
|
|
||||||
export HARMONY_SECRET_INFISICAL_PROJECT_ID=eb4723dc-eede-44d7-98cc-c8e0caf29ccb
|
|
||||||
export HARMONY_SECRET_INFISICAL_ENVIRONMENT=dev
|
|
||||||
export HARMONY_SECRET_INFISICAL_CLIENT_ID=dd16b07f-0e38-4090-a1d0-922de9f44d91
|
|
||||||
export HARMONY_SECRET_INFISICAL_CLIENT_SECRET=bd2ae054e7759b11ca2e908494196337cc800bab138cb1f59e8d9b15ca3f286f
|
|
||||||
|
|
||||||
cargo test
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "harmony-secret-derive"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2024"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
proc-macro = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
quote = "1.0"
|
|
||||||
proc-macro2 = "1.0"
|
|
||||||
proc-macro-crate = "3.3"
|
|
||||||
syn = "2.0"
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
use proc_macro::TokenStream;
|
|
||||||
use proc_macro_crate::{FoundCrate, crate_name};
|
|
||||||
use quote::quote;
|
|
||||||
use syn::{DeriveInput, Ident, parse_macro_input};
|
|
||||||
|
|
||||||
#[proc_macro_derive(Secret)]
|
|
||||||
pub fn derive_secret(input: TokenStream) -> TokenStream {
|
|
||||||
let input = parse_macro_input!(input as DeriveInput);
|
|
||||||
let struct_ident = &input.ident;
|
|
||||||
|
|
||||||
// The key for the secret will be the stringified name of the struct itself.
|
|
||||||
// e.g., `struct OKDClusterSecret` becomes key `"OKDClusterSecret"`.
|
|
||||||
let key = struct_ident.to_string();
|
|
||||||
|
|
||||||
// Find the path to the `harmony_secret` crate.
|
|
||||||
let secret_crate_path = match crate_name("harmony-secret") {
|
|
||||||
Ok(FoundCrate::Itself) => quote!(crate),
|
|
||||||
Ok(FoundCrate::Name(name)) => {
|
|
||||||
let ident = Ident::new(&name, proc_macro2::Span::call_site());
|
|
||||||
quote!(::#ident)
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
return syn::Error::new(proc_macro2::Span::call_site(), e.to_string())
|
|
||||||
.to_compile_error()
|
|
||||||
.into();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// The generated code now implements `Secret` for the struct itself.
|
|
||||||
// The struct must also derive `Serialize` and `Deserialize` for this to be useful.
|
|
||||||
let expanded = quote! {
|
|
||||||
impl #secret_crate_path::Secret for #struct_ident {
|
|
||||||
const KEY: &'static str = #key;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TokenStream::from(expanded)
|
|
||||||
}
|
|
||||||
@@ -9,13 +9,7 @@ use widget::{help::HelpWidget, score::ScoreListWidget};
|
|||||||
use std::{panic, sync::Arc, time::Duration};
|
use std::{panic, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use crossterm::event::{Event, EventStream, KeyCode, KeyEventKind};
|
use crossterm::event::{Event, EventStream, KeyCode, KeyEventKind};
|
||||||
use harmony::{
|
use harmony::{maestro::Maestro, score::Score, topology::Topology};
|
||||||
instrumentation::{self, HarmonyEvent},
|
|
||||||
inventory::Inventory,
|
|
||||||
maestro::Maestro,
|
|
||||||
score::Score,
|
|
||||||
topology::Topology,
|
|
||||||
};
|
|
||||||
use ratatui::{
|
use ratatui::{
|
||||||
self, Frame,
|
self, Frame,
|
||||||
layout::{Constraint, Layout, Position},
|
layout::{Constraint, Layout, Position},
|
||||||
@@ -45,62 +39,22 @@ pub mod tui {
|
|||||||
///
|
///
|
||||||
/// #[tokio::main]
|
/// #[tokio::main]
|
||||||
/// async fn main() {
|
/// async fn main() {
|
||||||
/// harmony_tui::run(
|
/// let inventory = Inventory::autoload();
|
||||||
/// Inventory::autoload(),
|
/// let topology = HAClusterTopology::autoload();
|
||||||
/// HAClusterTopology::autoload(),
|
/// let mut maestro = Maestro::new_without_initialization(inventory, topology);
|
||||||
/// vec![
|
///
|
||||||
/// Box::new(SuccessScore {}),
|
/// maestro.register_all(vec![
|
||||||
/// Box::new(ErrorScore {}),
|
/// Box::new(SuccessScore {}),
|
||||||
/// Box::new(PanicScore {}),
|
/// Box::new(ErrorScore {}),
|
||||||
/// ]
|
/// Box::new(PanicScore {}),
|
||||||
/// ).await.unwrap();
|
/// ]);
|
||||||
|
/// harmony_tui::init(maestro).await.unwrap();
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub async fn run<T: Topology + Send + Sync + 'static>(
|
pub async fn init<T: Topology + Send + Sync + 'static>(
|
||||||
inventory: Inventory,
|
|
||||||
topology: T,
|
|
||||||
scores: Vec<Box<dyn Score<T>>>,
|
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
|
||||||
let handle = init_instrumentation().await;
|
|
||||||
|
|
||||||
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
|
|
||||||
maestro.register_all(scores);
|
|
||||||
|
|
||||||
let result = init(maestro).await;
|
|
||||||
|
|
||||||
let _ = tokio::try_join!(handle);
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn init<T: Topology + Send + Sync + 'static>(
|
|
||||||
maestro: Maestro<T>,
|
maestro: Maestro<T>,
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let result = HarmonyTUI::new(maestro).init().await;
|
HarmonyTUI::new(maestro).init().await
|
||||||
|
|
||||||
instrumentation::instrument(HarmonyEvent::HarmonyFinished).unwrap();
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn init_instrumentation() -> tokio::task::JoinHandle<()> {
|
|
||||||
let handle = tokio::spawn(handle_harmony_events());
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if instrumentation::instrument(HarmonyEvent::HarmonyStarted).is_ok() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
handle
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_harmony_events() {
|
|
||||||
instrumentation::subscribe("Harmony TUI Logger", async |event| {
|
|
||||||
if let HarmonyEvent::HarmonyFinished = event {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
true
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct HarmonyTUI<T: Topology> {
|
pub struct HarmonyTUI<T: Topology> {
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "iobench"
|
|
||||||
edition = "2024"
|
|
||||||
version = "1.0.0"
|
|
||||||
license = "AGPL-3.0-or-later"
|
|
||||||
description = "A small command line utility to run fio benchmarks on localhost or remote ssh or kubernetes host. Was born out of a need to benchmark various ceph configurations!"
|
|
||||||
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
|
||||||
chrono = "0.4"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
serde_json = "1.0"
|
|
||||||
csv = "1.1"
|
|
||||||
num_cpus = "1.13"
|
|
||||||
|
|
||||||
[workspace]
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
This project was generated mostly by Gemini but it works so... :)
|
|
||||||
|
|
||||||
## To run iobench dashboard
|
|
||||||
|
|
||||||
```bash
|
|
||||||
virtualenv venv
|
|
||||||
source venv/bin/activate
|
|
||||||
pip install -r requirements_freeze.txt
|
|
||||||
python iobench-dash-v4.py
|
|
||||||
```
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
import dash
|
|
||||||
from dash import dcc, html, Input, Output, State, clientside_callback, ClientsideFunction
|
|
||||||
import plotly.express as px
|
|
||||||
import pandas as pd
|
|
||||||
import dash_bootstrap_components as dbc
|
|
||||||
import io
|
|
||||||
|
|
||||||
# --- Data Loading and Preparation ---
|
|
||||||
# csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
|
|
||||||
# Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
|
|
||||||
# Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
|
|
||||||
# Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
|
|
||||||
# Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
|
|
||||||
# Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
|
|
||||||
# Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
|
|
||||||
# Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
|
|
||||||
# Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
|
|
||||||
# Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
|
|
||||||
# Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
|
|
||||||
# Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
|
|
||||||
# """
|
|
||||||
#
|
|
||||||
# df = pd.read_csv(io.StringIO(csv_data))
|
|
||||||
df = pd.read_csv("iobench.csv") # Replace with the actual file path
|
|
||||||
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
|
|
||||||
|
|
||||||
# --- App Initialization and Global Settings ---
|
|
||||||
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
|
||||||
|
|
||||||
# Create master lists of options for checklists
|
|
||||||
unique_labels = sorted(df['label'].unique())
|
|
||||||
unique_tests = sorted(df['test_name'].unique())
|
|
||||||
|
|
||||||
# Create a consistent color map for each unique label
|
|
||||||
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
|
|
||||||
|
|
||||||
# --- App Layout ---
|
|
||||||
app.layout = dbc.Container([
|
|
||||||
# Header
|
|
||||||
dbc.Row(dbc.Col(html.H1("Ceph iobench Performance Dashboard", className="text-primary"),), className="my-4 text-center"),
|
|
||||||
|
|
||||||
# Controls and Graphs Row
|
|
||||||
dbc.Row([
|
|
||||||
# Control Panel Column
|
|
||||||
dbc.Col([
|
|
||||||
dbc.Card([
|
|
||||||
dbc.CardBody([
|
|
||||||
html.H4("Control Panel", className="card-title"),
|
|
||||||
html.Hr(),
|
|
||||||
|
|
||||||
# Metric Selection
|
|
||||||
dbc.Label("1. Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
|
|
||||||
dcc.Checklist(
|
|
||||||
id='metric-checklist',
|
|
||||||
options=[
|
|
||||||
{'label': 'IOPS', 'value': 'iops'},
|
|
||||||
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
|
|
||||||
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
|
|
||||||
],
|
|
||||||
value=['iops', 'latency_mean_ms', 'bandwidth_mbps'], # Default selection
|
|
||||||
labelClassName="d-block"
|
|
||||||
),
|
|
||||||
html.Hr(),
|
|
||||||
|
|
||||||
# Configuration Selection
|
|
||||||
dbc.Label("2. Select Configurations:", html_for="config-checklist", className="fw-bold"),
|
|
||||||
dbc.ButtonGroup([
|
|
||||||
dbc.Button("All", id="config-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
|
||||||
dbc.Button("None", id="config-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
|
||||||
], className="mb-2"),
|
|
||||||
dcc.Checklist(
|
|
||||||
id='config-checklist',
|
|
||||||
options=[{'label': label, 'value': label} for label in unique_labels],
|
|
||||||
value=unique_labels, # Select all by default
|
|
||||||
labelClassName="d-block"
|
|
||||||
),
|
|
||||||
html.Hr(),
|
|
||||||
|
|
||||||
# Test Name Selection
|
|
||||||
dbc.Label("3. Select Tests:", html_for="test-checklist", className="fw-bold"),
|
|
||||||
dbc.ButtonGroup([
|
|
||||||
dbc.Button("All", id="test-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
|
||||||
dbc.Button("None", id="test-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
|
||||||
], className="mb-2"),
|
|
||||||
dcc.Checklist(
|
|
||||||
id='test-checklist',
|
|
||||||
options=[{'label': test, 'value': test} for test in unique_tests],
|
|
||||||
value=unique_tests, # Select all by default
|
|
||||||
labelClassName="d-block"
|
|
||||||
),
|
|
||||||
])
|
|
||||||
], className="mb-4")
|
|
||||||
], width=12, lg=4),
|
|
||||||
|
|
||||||
# Graph Display Column
|
|
||||||
dbc.Col(id='graph-container', width=12, lg=8)
|
|
||||||
])
|
|
||||||
], fluid=True)
|
|
||||||
|
|
||||||
|
|
||||||
# --- Callbacks ---
|
|
||||||
|
|
||||||
# Callback to handle "Select All" / "Select None" for configurations
|
|
||||||
@app.callback(
|
|
||||||
Output('config-checklist', 'value'),
|
|
||||||
Input('config-select-all', 'n_clicks'),
|
|
||||||
Input('config-select-none', 'n_clicks'),
|
|
||||||
prevent_initial_call=True
|
|
||||||
)
|
|
||||||
def select_all_none_configs(all_clicks, none_clicks):
|
|
||||||
ctx = dash.callback_context
|
|
||||||
if not ctx.triggered:
|
|
||||||
return dash.no_update
|
|
||||||
|
|
||||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
|
||||||
if button_id == 'config-select-all':
|
|
||||||
return unique_labels
|
|
||||||
elif button_id == 'config-select-none':
|
|
||||||
return []
|
|
||||||
return dash.no_update
|
|
||||||
|
|
||||||
# Callback to handle "Select All" / "Select None" for tests
|
|
||||||
@app.callback(
|
|
||||||
Output('test-checklist', 'value'),
|
|
||||||
Input('test-select-all', 'n_clicks'),
|
|
||||||
Input('test-select-none', 'n_clicks'),
|
|
||||||
prevent_initial_call=True
|
|
||||||
)
|
|
||||||
def select_all_none_tests(all_clicks, none_clicks):
|
|
||||||
ctx = dash.callback_context
|
|
||||||
if not ctx.triggered:
|
|
||||||
return dash.no_update
|
|
||||||
|
|
||||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
|
||||||
if button_id == 'test-select-all':
|
|
||||||
return unique_tests
|
|
||||||
elif button_id == 'test-select-none':
|
|
||||||
return []
|
|
||||||
return dash.no_update
|
|
||||||
|
|
||||||
|
|
||||||
# Main callback to update graphs based on all selections
|
|
||||||
@app.callback(
|
|
||||||
Output('graph-container', 'children'),
|
|
||||||
[Input('metric-checklist', 'value'),
|
|
||||||
Input('config-checklist', 'value'),
|
|
||||||
Input('test-checklist', 'value')]
|
|
||||||
)
|
|
||||||
def update_graphs(selected_metrics, selected_configs, selected_tests):
|
|
||||||
"""
|
|
||||||
This function is triggered when any control's value changes.
|
|
||||||
It generates and returns a list of graphs based on all user selections.
|
|
||||||
"""
|
|
||||||
# Handle cases where no selection is made to prevent errors and show a helpful message
|
|
||||||
if not all([selected_metrics, selected_configs, selected_tests]):
|
|
||||||
return dbc.Alert(
|
|
||||||
"Please select at least one item from each category (Metric, Configuration, and Test) to view data.",
|
|
||||||
color="info",
|
|
||||||
className="mt-4"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Filter the DataFrame based on all selected criteria
|
|
||||||
filtered_df = df[df['label'].isin(selected_configs) & df['test_name'].isin(selected_tests)]
|
|
||||||
|
|
||||||
# If the filtered data is empty after selection, inform the user
|
|
||||||
if filtered_df.empty:
|
|
||||||
return dbc.Alert("No data available for the current selection.", color="warning", className="mt-4")
|
|
||||||
|
|
||||||
graph_list = []
|
|
||||||
metric_titles = {
|
|
||||||
'iops': 'IOPS Comparison (Higher is Better)',
|
|
||||||
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
|
|
||||||
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
|
|
||||||
}
|
|
||||||
|
|
||||||
for metric in selected_metrics:
|
|
||||||
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
|
|
||||||
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
|
|
||||||
|
|
||||||
fig = px.bar(
|
|
||||||
filtered_df,
|
|
||||||
x='test_name',
|
|
||||||
y=metric,
|
|
||||||
color='label',
|
|
||||||
barmode='group',
|
|
||||||
color_discrete_map=color_map,
|
|
||||||
error_y=error_y_param,
|
|
||||||
title=metric_titles.get(metric, metric),
|
|
||||||
labels={
|
|
||||||
"test_name": "Benchmark Test Name",
|
|
||||||
"iops": "IOPS",
|
|
||||||
"latency_mean_ms": "Mean Latency (ms)",
|
|
||||||
"bandwidth_mbps": "Bandwidth (MB/s)",
|
|
||||||
"label": "Cluster Configuration"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
fig.update_layout(
|
|
||||||
height=500,
|
|
||||||
xaxis_title=None,
|
|
||||||
legend_title="Configuration",
|
|
||||||
title_x=0.5,
|
|
||||||
xaxis={'categoryorder': sort_order},
|
|
||||||
xaxis_tickangle=-45,
|
|
||||||
margin=dict(b=120) # Add bottom margin to prevent tick labels from being cut off
|
|
||||||
)
|
|
||||||
|
|
||||||
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
|
|
||||||
|
|
||||||
return graph_list
|
|
||||||
|
|
||||||
# --- Run the App ---
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app.run(debug=True)
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
blinker==1.9.0
|
|
||||||
certifi==2025.7.14
|
|
||||||
charset-normalizer==3.4.2
|
|
||||||
click==8.2.1
|
|
||||||
dash==3.2.0
|
|
||||||
dash-bootstrap-components==2.0.3
|
|
||||||
Flask==3.1.1
|
|
||||||
idna==3.10
|
|
||||||
importlib_metadata==8.7.0
|
|
||||||
itsdangerous==2.2.0
|
|
||||||
Jinja2==3.1.6
|
|
||||||
MarkupSafe==3.0.2
|
|
||||||
narwhals==2.0.1
|
|
||||||
nest-asyncio==1.6.0
|
|
||||||
numpy==2.3.2
|
|
||||||
packaging==25.0
|
|
||||||
pandas==2.3.1
|
|
||||||
plotly==6.2.0
|
|
||||||
python-dateutil==2.9.0.post0
|
|
||||||
pytz==2025.2
|
|
||||||
requests==2.32.4
|
|
||||||
retrying==1.4.1
|
|
||||||
setuptools==80.9.0
|
|
||||||
six==1.17.0
|
|
||||||
typing_extensions==4.14.1
|
|
||||||
tzdata==2025.2
|
|
||||||
urllib3==2.5.0
|
|
||||||
Werkzeug==3.1.3
|
|
||||||
zipp==3.23.0
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: iobench
|
|
||||||
labels:
|
|
||||||
app: iobench
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: iobench
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: iobench
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: fio
|
|
||||||
image: juicedata/fio:latest # Replace with your preferred fio image
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
command: [ "sleep", "infinity" ] # Keeps the container running for kubectl exec
|
|
||||||
volumeMounts:
|
|
||||||
- name: iobench-pvc
|
|
||||||
mountPath: /data # Mount the PVC at /data
|
|
||||||
volumes:
|
|
||||||
- name: iobench-pvc
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: iobench-pvc # Matches your PVC name
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: iobench-pvc
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 5Gi
|
|
||||||
storageClassName: ceph-block
|
|
||||||
|
|
||||||
@@ -1,253 +0,0 @@
|
|||||||
use std::fs;
|
|
||||||
use std::io::{self, Write};
|
|
||||||
use std::process::{Command, Stdio};
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use chrono::Local;
|
|
||||||
use clap::Parser;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// A simple yet powerful I/O benchmarking tool using fio.
|
|
||||||
#[derive(Parser, Debug)]
|
|
||||||
#[command(author, version, about, long_about = None)]
|
|
||||||
struct Args {
|
|
||||||
/// Target for the benchmark.
|
|
||||||
/// Formats:
|
|
||||||
/// - localhost (default)
|
|
||||||
/// - ssh/{user}@{host}
|
|
||||||
/// - ssh/{user}@{host}:{port}
|
|
||||||
/// - k8s/{namespace}/{pod}
|
|
||||||
#[arg(short, long, default_value = "localhost")]
|
|
||||||
target: String,
|
|
||||||
|
|
||||||
#[arg(short, long, default_value = ".")]
|
|
||||||
benchmark_dir: String,
|
|
||||||
|
|
||||||
/// Comma-separated list of tests to run.
|
|
||||||
/// Available tests: read, write, randread, randwrite,
|
|
||||||
/// multiread, multiwrite, multirandread, multirandwrite.
|
|
||||||
#[arg(long, default_value = "read,write,randread,randwrite,multiread,multiwrite,multirandread,multirandwrite")]
|
|
||||||
tests: String,
|
|
||||||
|
|
||||||
/// Duration of each test in seconds.
|
|
||||||
#[arg(long, default_value_t = 15)]
|
|
||||||
duration: u64,
|
|
||||||
|
|
||||||
/// Output directory for results.
|
|
||||||
/// Defaults to ./iobench-{current_datetime}.
|
|
||||||
#[arg(long)]
|
|
||||||
output_dir: Option<String>,
|
|
||||||
|
|
||||||
/// The size of the test file for fio.
|
|
||||||
#[arg(long, default_value = "1G")]
|
|
||||||
size: String,
|
|
||||||
|
|
||||||
/// The block size for I/O operations.
|
|
||||||
#[arg(long, default_value = "4k")]
|
|
||||||
block_size: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
struct FioOutput {
|
|
||||||
jobs: Vec<FioJobResult>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
struct FioJobResult {
|
|
||||||
jobname: String,
|
|
||||||
read: FioMetrics,
|
|
||||||
write: FioMetrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
struct FioMetrics {
|
|
||||||
bw: f64,
|
|
||||||
iops: f64,
|
|
||||||
clat_ns: LatencyMetrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
struct LatencyMetrics {
|
|
||||||
mean: f64,
|
|
||||||
stddev: f64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
|
||||||
struct BenchmarkResult {
|
|
||||||
test_name: String,
|
|
||||||
iops: f64,
|
|
||||||
bandwidth_kibps: f64,
|
|
||||||
latency_mean_ms: f64,
|
|
||||||
latency_stddev_ms: f64,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> io::Result<()> {
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
let output_dir = args.output_dir.unwrap_or_else(|| {
|
|
||||||
format!("./iobench-{}", Local::now().format("%Y-%m-%d-%H%M%S"))
|
|
||||||
});
|
|
||||||
fs::create_dir_all(&output_dir)?;
|
|
||||||
|
|
||||||
let tests_to_run: Vec<&str> = args.tests.split(',').collect();
|
|
||||||
let mut results = Vec::new();
|
|
||||||
|
|
||||||
for test in tests_to_run {
|
|
||||||
println!("--------------------------------------------------");
|
|
||||||
println!("Running test: {}", test);
|
|
||||||
|
|
||||||
let (rw, numjobs) = match test {
|
|
||||||
"read" => ("read", 1),
|
|
||||||
"write" => ("write", 1),
|
|
||||||
"randread" => ("randread", 1),
|
|
||||||
"randwrite" => ("randwrite", 1),
|
|
||||||
"multiread" => ("read", 4),
|
|
||||||
"multiwrite" => ("write", 4),
|
|
||||||
"multirandread" => ("randread", 4),
|
|
||||||
"multirandwrite" => ("randwrite", 4),
|
|
||||||
_ => {
|
|
||||||
eprintln!("Unknown test: {}. Skipping.", test);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let test_name = format!("{}-{}-sync-test", test, args.block_size);
|
|
||||||
let fio_command = format!(
|
|
||||||
"fio --filename={}/iobench_testfile --direct=1 --fsync=1 --rw={} --bs={} --numjobs={} --iodepth=1 --runtime={} --time_based --group_reporting --name={} --size={} --output-format=json",
|
|
||||||
args.benchmark_dir, rw, args.block_size, numjobs, args.duration, test_name, args.size
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Executing command:\n{}\n", fio_command);
|
|
||||||
|
|
||||||
let output = match run_command(&args.target, &fio_command) {
|
|
||||||
Ok(out) => out,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("Failed to execute command for test {}: {}", test, e);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
let result = parse_fio_output(&output, &test_name, rw);
|
|
||||||
// TODO store raw fio output and print it
|
|
||||||
match result {
|
|
||||||
Ok(res) => {
|
|
||||||
results.push(res);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("Error parsing fio output for test {}: {}", test, e);
|
|
||||||
eprintln!("Raw output:\n{}", output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("{output}");
|
|
||||||
println!("Test {} completed.", test);
|
|
||||||
// A brief pause to let the system settle before the next test.
|
|
||||||
thread::sleep(Duration::from_secs(2));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup the test file on the target
|
|
||||||
println!("--------------------------------------------------");
|
|
||||||
println!("Cleaning up test file on target...");
|
|
||||||
let cleanup_command = "rm -f ./iobench_testfile";
|
|
||||||
if let Err(e) = run_command(&args.target, cleanup_command) {
|
|
||||||
eprintln!("Warning: Failed to clean up test file on target: {}", e);
|
|
||||||
} else {
|
|
||||||
println!("Cleanup successful.");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if results.is_empty() {
|
|
||||||
println!("\nNo benchmark results to display.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output results to a CSV file for easy analysis
|
|
||||||
let csv_path = format!("{}/summary.csv", output_dir);
|
|
||||||
let mut wtr = csv::Writer::from_path(&csv_path)?;
|
|
||||||
for result in &results {
|
|
||||||
wtr.serialize(result)?;
|
|
||||||
}
|
|
||||||
wtr.flush()?;
|
|
||||||
|
|
||||||
println!("\nBenchmark summary saved to {}", csv_path);
|
|
||||||
println!("\n--- Benchmark Results Summary ---");
|
|
||||||
println!("{:<25} {:>10} {:>18} {:>20} {:>22}", "Test Name", "IOPS", "Bandwidth (KiB/s)", "Latency Mean (ms)", "Latency StdDev (ms)");
|
|
||||||
println!("{:-<98}", "");
|
|
||||||
for result in results {
|
|
||||||
println!("{:<25} {:>10.2} {:>18.2} {:>20.4} {:>22.4}", result.test_name, result.iops, result.bandwidth_kibps, result.latency_mean_ms, result.latency_stddev_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_command(target: &str, command: &str) -> io::Result<String> {
|
|
||||||
let (program, args) = if target == "localhost" {
|
|
||||||
("sudo", vec!["sh".to_string(), "-c".to_string(), command.to_string()])
|
|
||||||
} else if target.starts_with("ssh/") {
|
|
||||||
let target_str = target.strip_prefix("ssh/").unwrap();
|
|
||||||
let ssh_target;
|
|
||||||
let mut ssh_args = vec!["-o".to_string(), "StrictHostKeyChecking=no".to_string()];
|
|
||||||
let port_parts: Vec<&str> = target_str.split(':').collect();
|
|
||||||
if port_parts.len() == 2 {
|
|
||||||
ssh_target = port_parts[0].to_string();
|
|
||||||
ssh_args.push("-p".to_string());
|
|
||||||
ssh_args.push(port_parts[1].to_string());
|
|
||||||
} else {
|
|
||||||
ssh_target = target_str.to_string();
|
|
||||||
}
|
|
||||||
ssh_args.push(ssh_target);
|
|
||||||
ssh_args.push(format!("sudo sh -c '{}'", command));
|
|
||||||
("ssh", ssh_args)
|
|
||||||
} else if target.starts_with("k8s/") {
|
|
||||||
let parts: Vec<&str> = target.strip_prefix("k8s/").unwrap().split('/').collect();
|
|
||||||
if parts.len() != 2 {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid k8s target format. Expected k8s/{namespace}/{pod}"));
|
|
||||||
}
|
|
||||||
let namespace = parts[0];
|
|
||||||
let pod = parts[1];
|
|
||||||
("kubectl", vec!["exec".to_string(), "-n".to_string(), namespace.to_string(), pod.to_string(), "--".to_string(), "sh".to_string(), "-c".to_string(), command.to_string()])
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid target format"));
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut cmd = Command::new(program);
|
|
||||||
cmd.args(&args);
|
|
||||||
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
|
|
||||||
|
|
||||||
let child = cmd.spawn()?;
|
|
||||||
let output = child.wait_with_output()?;
|
|
||||||
|
|
||||||
if !output.status.success() {
|
|
||||||
eprintln!("Command failed with status: {}", output.status);
|
|
||||||
io::stderr().write_all(&output.stderr)?;
|
|
||||||
return Err(io::Error::new(io::ErrorKind::Other, "Command execution failed"));
|
|
||||||
}
|
|
||||||
|
|
||||||
String::from_utf8(output.stdout)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_fio_output(output: &str, test_name: &str, rw: &str) -> Result<BenchmarkResult, String> {
|
|
||||||
let fio_data: FioOutput = serde_json::from_str(output)
|
|
||||||
.map_err(|e| format!("Failed to deserialize fio JSON: {}", e))?;
|
|
||||||
|
|
||||||
let job_result = fio_data.jobs.iter()
|
|
||||||
.find(|j| j.jobname == test_name)
|
|
||||||
.ok_or_else(|| format!("Could not find job result for '{}' in fio output", test_name))?;
|
|
||||||
|
|
||||||
let metrics = if rw.contains("read") {
|
|
||||||
&job_result.read
|
|
||||||
} else {
|
|
||||||
&job_result.write
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(BenchmarkResult {
|
|
||||||
test_name: test_name.to_string(),
|
|
||||||
iops: metrics.iops,
|
|
||||||
bandwidth_kibps: metrics.bw,
|
|
||||||
latency_mean_ms: metrics.clat_ns.mean / 1_000_000.0,
|
|
||||||
latency_stddev_ms: metrics.clat_ns.stddev / 1_000_000.0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -12,7 +12,7 @@ env_logger = { workspace = true }
|
|||||||
yaserde = { git = "https://github.com/jggc/yaserde.git" }
|
yaserde = { git = "https://github.com/jggc/yaserde.git" }
|
||||||
yaserde_derive = { git = "https://github.com/jggc/yaserde.git" }
|
yaserde_derive = { git = "https://github.com/jggc/yaserde.git" }
|
||||||
xml-rs = "0.8"
|
xml-rs = "0.8"
|
||||||
thiserror.workspace = true
|
thiserror = "1.0"
|
||||||
async-trait = { workspace = true }
|
async-trait = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
|
|||||||
Reference in New Issue
Block a user