Compare commits

..

5 Commits

41 changed files with 824 additions and 2898 deletions

649
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,6 @@ members = [
"harmony_cli",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
]
[workspace.package]
@@ -23,7 +20,7 @@ readme = "README.md"
license = "GNU AGPL v3"
[workspace.dependencies]
log = { version = "0.4", features = ["kv"] }
log = "0.4"
env_logger = "0.11"
derive-new = "0.7"
async-trait = "0.1"
@@ -56,12 +53,6 @@ chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
tempfile = "3.20.0"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"
lazy_static = "1.5.0"
directories = "6.0.0"
thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"

View File

@@ -1,108 +0,0 @@
# OPNsense PXE Lab Environment
This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall.
## Overview
The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM:
1. **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab.
2. **Two Virtual Machines**:
* `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server.
* `pxe-node-1`: A client VM configured to boot from the network.
## Prerequisites
Ensure you have the following software installed on your Arch Linux host:
* `libvirt`
* `qemu`
* `virt-install` (from the `virt-install` package)
* `curl`
* `bzip2`
## Usage
### 1. Create the Environment
Run the `up` command to download the necessary images and create the network and VMs.
```bash
sudo ./pxe_vm_lab_setup.sh up
```
### 2. Install and Configure OPNsense
The OPNsense VM is created but the OS needs to be installed manually via the console.
1. **Connect to the VM console**:
```bash
sudo virsh console opnsense-pxe
```
2. **Log in as the installer**:
* Username: `installer`
* Password: `opnsense`
3. **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`):
* Find the MAC address for the `harmonylan` interface by running this command in another terminal:
```bash
virsh domiflist opnsense-pxe
# Example output:
# Interface Type Source Model MAC
# ---------------------------------------------------------
# vnet18 network default virtio 52:54:00:b5:c4:6d
# vnet19 network harmonylan virtio 52:54:00:21:f9:ba
```
* Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**.
* Assign the other interface as your **WAN**.
4. After the installation is complete, **shut down** the VM from the console menu.
5. **Detach the installation media** by editing the VM's configuration:
```bash
sudo virsh edit opnsense-pxe
```
Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`).
6. **Start the VM** to boot into the newly installed system:
```bash
sudo virsh start opnsense-pxe
```
### 3. Connect to OPNsense from Your Host
To configure OPNsense, you need to connect your host to the `harmonylan` network.
1. By default, OPNsense configures its LAN interface with the IP `192.168.1.1`.
2. Assign a compatible IP address to your host's `virbr1` bridge interface:
```bash
sudo ip addr add 192.168.1.5/24 dev virbr1
```
3. You can now access the OPNsense VM from your host:
* **SSH**: `ssh root@192.168.1.1` (password: `opnsense`)
* **Web UI**: `https://192.168.1.1`
### 4. Configure PXE Services with Harmony
With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details:
* **Hostname/IP**: `192.168.1.1`
* **Credentials**: `root` / `opnsense`
### 5. Boot the PXE Client
Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network.
```bash
sudo virsh start pxe-node-1
sudo virsh console pxe-node-1
```
## Cleanup
To destroy all VMs and networks created by the script, run the `clean` command:
```bash
sudo ./pxe_vm_lab_setup.sh clean
```

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# --- Configuration ---
LAB_DIR="/var/lib/harmony_pxe_test"
IMG_DIR="${LAB_DIR}/images"
STATE_DIR="${LAB_DIR}/state"
VM_OPN="opnsense-pxe"
VM_PXE="pxe-node-1"
NET_HARMONYLAN="harmonylan"
# Network settings for the isolated LAN
VLAN_CIDR="192.168.150.0/24"
VLAN_GW="192.168.150.1"
VLAN_MASK="255.255.255.0"
# VM Specifications
RAM_OPN="2048"
VCPUS_OPN="2"
DISK_OPN_GB="10"
OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant
RAM_PXE="4096"
VCPUS_PXE="2"
DISK_PXE_GB="40"
OS_VARIANT_LINUX="centos-stream9"
OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2"
OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img"
CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso"
CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso"
CONNECT_URI="qemu:///system"
download_if_missing() {
local url="$1"
local dest="$2"
if [[ ! -f "$dest" ]]; then
echo "Downloading $url to $dest"
mkdir -p "$(dirname "$dest")"
local tmp
tmp="$(mktemp)"
curl -L --progress-bar "$url" -o "$tmp"
case "$url" in
*.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;;
*) mv "$tmp" "$dest" ;;
esac
else
echo "Already present: $dest"
fi
}
# Ensures a libvirt network is defined and active
ensure_network() {
local net_name="$1"
local net_xml_path="$2"
if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then
echo "Network ${net_name} already exists."
else
echo "Defining network ${net_name} from ${net_xml_path}"
virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}"
fi
if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then
echo "Starting network ${net_name}..."
virsh --connect "${CONNECT_URI}" net-start "${net_name}"
virsh --connect "${CONNECT_URI}" net-autostart "${net_name}"
fi
}
# Destroys a VM completely
destroy_vm() {
local vm_name="$1"
if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then
echo "Destroying and undefining VM: ${vm_name}"
virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true
virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram
fi
}
# Destroys a libvirt network
destroy_network() {
local net_name="$1"
if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then
echo "Destroying and undefining network: ${net_name}"
virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true
virsh --connect "${CONNECT_URI}" net-undefine "$net_name"
fi
}
# --- Main Logic ---
create_lab_environment() {
# Create network definition files
cat > "${STATE_DIR}/default.xml" <<EOF
<network>
<name>default</name>
<forward mode='nat'/>
<bridge name='virbr0' stp='on' delay='0'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.100' end='192.168.122.200'/>
</dhcp>
</ip>
</network>
EOF
cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF
<network>
<name>${NET_HARMONYLAN}</name>
<bridge name='virbr1' stp='on' delay='0'/>
</network>
EOF
# Ensure both networks exist and are active
ensure_network "default" "${STATE_DIR}/default.xml"
ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml"
# --- Create OPNsense VM (MODIFIED SECTION) ---
local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2"
if [[ ! -f "$disk_opn" ]]; then
qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G"
fi
echo "Creating OPNsense VM using serial image..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_OPN}" \
--ram "${RAM_OPN}" \
--vcpus "${VCPUS_OPN}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_OPN}" \
--graphics none \
--noautoconsole \
--disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \
--disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \
--network network=default,model=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--boot uefi,menu=on
echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}"
echo "The VM will boot from the serial installation image."
echo "Login with user 'installer' and password 'opnsense' to start the installation."
echo "Install onto the VirtIO disk (vtbd0)."
echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system."
# --- Create PXE Client VM ---
local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2"
if [[ ! -f "$disk_pxe" ]]; then
qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G"
fi
echo "Creating PXE client VM..."
virt-install \
--connect "${CONNECT_URI}" \
--name "${VM_PXE}" \
--ram "${RAM_PXE}" \
--vcpus "${VCPUS_PXE}" \
--cpu host-passthrough \
--os-variant "${OS_VARIANT_LINUX}" \
--graphics none \
--noautoconsole \
--disk path="${disk_pxe}",format=qcow2,bus=virtio \
--network network="${NET_HARMONYLAN}",model=virtio \
--pxe \
--boot uefi,menu=on
echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}."
}
# --- Script Entrypoint ---
case "${1:-}" in
up)
mkdir -p "${IMG_DIR}" "${STATE_DIR}"
download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH"
download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH"
create_lab_environment
echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs."
;;
clean)
destroy_vm "${VM_PXE}"
destroy_vm "${VM_OPN}"
destroy_network "${NET_HARMONYLAN}"
# Optionally destroy the default network if you want a full reset
# destroy_network "default"
echo "Cleanup complete."
;;
*)
echo "Usage: sudo $0 {up|clean}"
exit 1
;;
esac

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
http::StaticFilesHttpScore,
ipxe::IpxeScore,
@@ -129,21 +130,16 @@ async fn main() {
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
@@ -83,25 +84,20 @@ async fn main() {
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -0,0 +1,12 @@
[package]
name = "example_remove_rook_osd"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
harmony_tui = { version = "0.1.0", path = "../../harmony_tui" }
tokio.workspace = true

View File

@@ -0,0 +1,18 @@
use harmony::{
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let ceph_score = CephRemoveOsd {
osd_deployment_name: "rook-ceph-osd-2".to_string(),
rook_ceph_namespace: "rook-ceph".to_string(),
};
let topology = K8sAnywhereTopology::from_env();
let inventory = Inventory::autoload();
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
.await
.unwrap();
}

View File

@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
harmony_tui::run(
Inventory::autoload(),
DummyInfra {},
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
],
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {

View File

@@ -16,8 +16,8 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
@@ -38,8 +38,8 @@ serde-value.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
@@ -59,7 +59,7 @@ similar.workspace = true
futures-util = "0.3.31"
tokio-util = "0.7.15"
strum = { version = "0.27.1", features = ["derive"] }
tempfile.workspace = true
tempfile = "3.20.0"
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
@@ -67,7 +67,6 @@ bollard.workspace = true
tar.workspace = true
base64.workspace = true
once_cell = "1.21.3"
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
[dev-dependencies]
pretty_assertions.workspace = true

Binary file not shown.

View File

@@ -241,7 +241,7 @@ pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
"DummyInfra"
todo!()
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {

View File

@@ -5,7 +5,7 @@ use k8s_openapi::{
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
runtime::reflector::Lookup,
@@ -17,7 +17,9 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
#[derive(new, Clone)]
pub struct K8sClient {
@@ -51,6 +53,66 @@ impl K8sClient {
})
}
pub async fn get_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn delete_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let delete_params = DeleteParams::default();
deployments.delete(name, &delete_params).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,
@@ -76,6 +138,68 @@ impl K8sClient {
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
&self,
name: String,
label: String,
namespace: Option<&str>,
command: Vec<&str>,
) -> Result<String, String> {
let api: Api<Pod>;
if let Some(ns) = namespace {
api = Api::namespaced(self.client.clone(), ns);
} else {
api = Api::default_namespaced(self.client.clone());
}
let pod_list = api
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
.await
.expect("couldn't get list of pods");
let res = api
.exec(
pod_list
.items
.first()
.expect("couldn't get pod")
.name()
.expect("couldn't get pod name")
.into_owned()
.as_str(),
command,
&AttachParams::default().stdout(true).stderr(true),
)
.await;
match res {
Err(e) => Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
.expect("Couldn't get status")
.await
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
stdout.read_to_string(&mut stdout_buf).await;
}
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" {
Ok(stdout_buf)
} else {
Err(s)
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
}
}
}
}
/// Will execute a command in the first pod found that matches the label `app.kubernetes.io/name={name}`
pub async fn exec_app(
&self,

View File

@@ -14,5 +14,6 @@ pub mod monitoring;
pub mod okd;
pub mod opnsense;
pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;

View File

@@ -0,0 +1,419 @@
use std::{
process::Command,
sync::Arc,
time::{Duration, Instant},
};
use async_trait::async_trait;
use log::{info, warn};
use serde::{Deserialize, Serialize};
use tokio::time::sleep;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
#[derive(Debug, Clone, Serialize)]
pub struct CephRemoveOsd {
pub osd_deployment_name: String,
pub rook_ceph_namespace: String,
}
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String {
format!("CephRemoveOsdScore")
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(CephRemoveOsdInterpret {
score: self.clone(),
})
}
}
#[derive(Debug, Clone)]
pub struct CephRemoveOsdInterpret {
score: CephRemoveOsd,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.verify_ceph_toolbox_exists(client.clone()).await?;
self.scale_deployment(client.clone()).await?;
self.verify_deployment_scaled(client.clone()).await?;
self.delete_deployment(client.clone()).await?;
self.verify_deployment_deleted(client.clone()).await?;
let osd_id_full = self.get_ceph_osd_id().unwrap();
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
.await?;
Ok(Outcome::success(format!(
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
osd_id_full, self.score.osd_deployment_name
)))
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl CephRemoveOsdInterpret {
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
let osd_id_numeric = self
.score
.osd_deployment_name
.split('-')
.nth(3)
.ok_or_else(|| {
InterpretError::new(format!(
"Could not parse OSD id from deployment name {}",
self.score.osd_deployment_name
))
})?;
let osd_id_full = format!("osd.{}", osd_id_numeric);
info!(
"Targeting Ceph OSD: {} (parsed from deployment {})",
osd_id_full, self.score.osd_deployment_name
);
Ok(osd_id_full)
}
pub async fn verify_ceph_toolbox_exists(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let toolbox_dep = "rook-ceph-tools".to_string();
match client
.get_deployment(&toolbox_dep, Some(&self.score.rook_ceph_namespace))
.await
{
Ok(Some(deployment)) => {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
} else {
return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
}
} else {
Err(InterpretError::new(format!(
"failed to get deployment status {}",
&toolbox_dep
)))
}
}
Ok(None) => Err(InterpretError::new(format!(
"Deployment '{}' not found in namespace '{}'.",
&toolbox_dep, self.score.rook_ceph_namespace
))),
Err(e) => Err(InterpretError::new(format!(
"Failed to query for deployment '{}': {}",
&toolbox_dep, e
))),
}
}
pub async fn scale_deployment(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
info!(
"Scaling down OSD deployment: {}",
self.score.osd_deployment_name
);
client
.scale_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
0,
)
.await?;
Ok(Outcome::success(format!(
"Scaled down deployment {}",
self.score.osd_deployment_name
)))
}
pub async fn verify_deployment_scaled(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!("Waiting for OSD deployment to scale down to 0 replicas");
loop {
let dep = client
.get_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
if let Some(deployment) = dep {
if let Some(status) = deployment.status {
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
}
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for deployment {} to scale down",
self.score.osd_deployment_name
)));
}
sleep(interval).await;
}
}
fn build_timer(&self) -> (Duration, Duration, Instant) {
let timeout = Duration::from_secs(120);
let interval = Duration::from_secs(5);
let start = Instant::now();
(timeout, interval, start)
}
pub async fn delete_deployment(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
info!(
"Deleting OSD deployment: {}",
self.score.osd_deployment_name
);
client
.delete_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
Ok(Outcome::success(format!(
"deployment {} deleted",
self.score.osd_deployment_name
)))
}
pub async fn verify_deployment_deleted(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!("Waiting for OSD deployment to scale down to 0 replicas");
loop {
let dep = client
.get_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
if dep.is_none() {
info!(
"Deployment {} successfully deleted.",
self.score.osd_deployment_name
);
return Ok(Outcome::success(format!(
"Deployment {} deleted.",
self.score.osd_deployment_name
)));
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for deployment {} to be deleted",
self.score.osd_deployment_name
)));
}
sleep(interval).await;
}
}
fn get_osd_tree(&self, json: serde_json::Value) -> Result<CephOsdTree, InterpretError> {
let nodes = json.get("nodes").ok_or_else(|| {
InterpretError::new("Missing 'nodes' field in ceph osd tree JSON".to_string())
})?;
let tree: CephOsdTree = CephOsdTree {
nodes: serde_json::from_value(nodes.clone()).map_err(|e| {
InterpretError::new(format!("Failed to parse ceph osd tree JSON: {}", e))
})?,
};
Ok(tree)
}
pub async fn purge_ceph_osd(
&self,
client: Arc<K8sClient>,
osd_id_full: &str,
) -> Result<Outcome, InterpretError> {
info!(
"Purging OSD {} from Ceph cluster and removing its auth key",
osd_id_full
);
client
.exec_app_capture_output(
"rook-ceph-tools".to_string(),
"app".to_string(),
Some(&self.score.rook_ceph_namespace),
vec![
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
format!("ceph auth del osd.{osd_id_full}").as_str(),
],
)
.await?;
Ok(Outcome::success(format!(
"osd id {} removed from osd tree",
osd_id_full
)))
}
pub async fn verify_ceph_osd_removal(
&self,
client: Arc<K8sClient>,
osd_id_full: &str,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!(
"Verifying OSD {} has been removed from the Ceph tree...",
osd_id_full
);
loop {
let output = client
.exec_app_capture_output(
"rook-ceph-tools".to_string(),
"app".to_string(),
Some(&self.score.rook_ceph_namespace),
vec!["ceph osd tree -f json"],
)
.await?;
let tree =
self.get_osd_tree(serde_json::from_str(&output).expect("could not extract json"));
let osd_found = tree
.unwrap()
.nodes
.iter()
.any(|node| node.name == osd_id_full);
if !osd_found {
return Ok(Outcome::success(format!(
"Successfully verified that OSD {} is removed from the Ceph cluster.",
osd_id_full,
)));
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for OSD {} to be removed from Ceph tree",
osd_id_full
)));
}
warn!(
"OSD {} still found in Ceph tree, retrying in {:?}...",
osd_id_full, interval
);
sleep(interval).await;
}
}
}
#[derive(Debug, Deserialize, PartialEq)]
pub struct CephOsdTree {
pub nodes: Vec<CephNode>,
}
#[derive(Debug, Deserialize, PartialEq)]
pub struct CephNode {
pub id: i32,
pub name: String,
#[serde(rename = "type")]
pub node_type: String,
pub type_id: Option<i32>,
pub children: Option<Vec<i32>>,
pub exists: Option<i32>,
pub status: Option<String>,
}
#[cfg(test)]
mod tests {
use serde_json::json;
use super::*;
#[test]
fn test_get_osd_tree() {
let json_data = json!({
"nodes": [
{"id": 1, "name": "osd.1", "type": "osd", "primary_affinity":"1"},
{"id": 2, "name": "osd.2", "type": "osd", "crush_weight": 1.22344}
]
});
let interpret = CephRemoveOsdInterpret {
score: CephRemoveOsd {
osd_deployment_name: "osd-1".to_string(),
rook_ceph_namespace: "dummy_ns".to_string(),
},
};
let json = interpret.get_osd_tree(json_data).unwrap();
let expected = CephOsdTree {
nodes: vec![
CephNode {
id: 1,
name: "osd.1".to_string(),
node_type: "osd".to_string(),
type_id: None,
children: None,
exists: None,
status: None,
},
CephNode {
id: 2,
name: "osd.2".to_string(),
node_type: "osd".to_string(),
type_id: None,
children: None,
exists: None,
status: None,
},
],
};
assert_eq!(json, expected);
}
}

View File

@@ -0,0 +1 @@
pub mod ceph_remove_osd_score;

View File

@@ -0,0 +1 @@
pub mod ceph;

View File

@@ -22,7 +22,6 @@ indicatif = "0.18.0"
lazy_static = "1.5.0"
log.workspace = true
indicatif-log-bridge = "0.2.3"
chrono.workspace = true
[dev-dependencies]
harmony = { path = "../harmony", features = ["testing"] }

View File

@@ -1,17 +1,22 @@
use chrono::Local;
use console::style;
use harmony::{
instrumentation::{self, HarmonyEvent},
modules::application::ApplicationFeatureStatus,
topology::TopologyStatus,
};
use log::{error, info, log_enabled};
use std::io::Write;
use std::sync::{Arc, Mutex};
use indicatif::MultiProgress;
use indicatif_log_bridge::LogWrapper;
use log::error;
use std::{
sync::{Arc, Mutex},
thread,
time::Duration,
};
use crate::progress::{IndicatifProgressTracker, ProgressTracker};
pub fn init() -> tokio::task::JoinHandle<()> {
configure_logger();
let handle = tokio::spawn(handle_events());
let base_progress = configure_logger();
let handle = tokio::spawn(handle_events(base_progress));
loop {
if instrumentation::instrument(HarmonyEvent::HarmonyStarted).is_ok() {
@@ -22,76 +27,28 @@ pub fn init() -> tokio::task::JoinHandle<()> {
handle
}
fn configure_logger() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
.format(|buf, record| {
let debug_mode = log_enabled!(log::Level::Debug);
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S");
fn configure_logger() -> MultiProgress {
let logger =
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).build();
let level = logger.filter();
let progress = MultiProgress::new();
let level = match record.level() {
log::Level::Error => style("ERROR").red(),
log::Level::Warn => style("WARN").yellow(),
log::Level::Info => style("INFO").green(),
log::Level::Debug => style("DEBUG").blue(),
log::Level::Trace => style("TRACE").magenta(),
};
if let Some(status) = record.key_values().get(log::kv::Key::from("status")) {
let status = status.to_borrowed_str().unwrap();
let emoji = match status {
"finished" => style(crate::theme::EMOJI_SUCCESS.to_string()).green(),
"skipped" => style(crate::theme::EMOJI_SKIP.to_string()).yellow(),
"failed" => style(crate::theme::EMOJI_ERROR.to_string()).red(),
_ => style("".into()),
};
if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {} {}",
timestamp,
level,
record.target(),
emoji,
record.args()
)
} else {
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
}
} else if let Some(emoji) = record.key_values().get(log::kv::Key::from("emoji")) {
if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {} {}",
timestamp,
level,
record.target(),
emoji,
record.args()
)
} else {
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
}
} else if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {}",
timestamp,
level,
record.target(),
record.args()
)
} else {
writeln!(buf, "[{:<5}] {}", level, record.args())
}
})
.init();
LogWrapper::new(progress.clone(), logger)
.try_init()
.unwrap();
log::set_max_level(level);
progress
}
async fn handle_events() {
async fn handle_events(base_progress: MultiProgress) {
let progress_tracker = Arc::new(IndicatifProgressTracker::new(base_progress.clone()));
let preparing_topology = Arc::new(Mutex::new(false));
let current_score: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
instrumentation::subscribe("Harmony CLI Logger", {
move |event| {
let progress_tracker = Arc::clone(&progress_tracker);
let preparing_topology = Arc::clone(&preparing_topology);
let current_score = Arc::clone(&current_score);
@@ -102,57 +59,90 @@ async fn handle_events() {
match event {
HarmonyEvent::HarmonyStarted => {}
HarmonyEvent::HarmonyFinished => {
let emoji = crate::theme::EMOJI_HARMONY.to_string();
info!(emoji = emoji.as_str(); "Harmony completed");
progress_tracker.add_section(
"harmony-summary",
&format!("\n{} Harmony completed\n\n", crate::theme::EMOJI_HARMONY),
);
progress_tracker.add_section("harmony-finished", "\n\n");
thread::sleep(Duration::from_millis(200));
return false;
}
HarmonyEvent::TopologyStateChanged {
topology,
status,
message,
} => match status {
TopologyStatus::Queued => {}
TopologyStatus::Preparing => {
let emoji = format!("{}", style(crate::theme::EMOJI_TOPOLOGY.to_string()).yellow());
info!(emoji = emoji.as_str(); "Preparing environment: {topology}...");
(*preparing_topology) = true;
}
TopologyStatus::Success => {
(*preparing_topology) = false;
if let Some(message) = message {
info!(status = "finished"; "{message}");
} => {
let section_key = topology_key(&topology);
match status {
TopologyStatus::Queued => {}
TopologyStatus::Preparing => {
progress_tracker.add_section(
&section_key,
&format!(
"\n{} Preparing environment: {topology}...",
crate::theme::EMOJI_TOPOLOGY
),
);
(*preparing_topology) = true;
}
TopologyStatus::Success => {
(*preparing_topology) = false;
progress_tracker.add_task(&section_key, "topology-success", "");
progress_tracker
.finish_task("topology-success", &message.unwrap_or("".into()));
}
TopologyStatus::Noop => {
(*preparing_topology) = false;
progress_tracker.add_task(&section_key, "topology-skip", "");
progress_tracker
.skip_task("topology-skip", &message.unwrap_or("".into()));
}
TopologyStatus::Error => {
progress_tracker.add_task(&section_key, "topology-error", "");
(*preparing_topology) = false;
progress_tracker
.fail_task("topology-error", &message.unwrap_or("".into()));
}
}
TopologyStatus::Noop => {
(*preparing_topology) = false;
if let Some(message) = message {
info!(status = "skipped"; "{message}");
}
}
TopologyStatus::Error => {
(*preparing_topology) = false;
if let Some(message) = message {
error!(status = "failed"; "{message}");
}
}
},
}
HarmonyEvent::InterpretExecutionStarted {
execution_id: _,
topology: _,
execution_id: task_key,
topology,
interpret: _,
score,
message,
} => {
if *preparing_topology || current_score.is_some() {
info!("{message}");
let is_key_topology = (*preparing_topology)
&& progress_tracker.contains_section(&topology_key(&topology));
let is_key_current_score = current_score.is_some()
&& progress_tracker
.contains_section(&score_key(&current_score.clone().unwrap()));
let is_key_score = progress_tracker.contains_section(&score_key(&score));
let section_key = if is_key_topology {
topology_key(&topology)
} else if is_key_current_score {
score_key(&current_score.clone().unwrap())
} else if is_key_score {
score_key(&score)
} else {
(*current_score) = Some(score.clone());
let emoji = format!("{}", style(crate::theme::EMOJI_SCORE).blue());
info!(emoji = emoji.as_str(); "Interpreting score: {score}...");
}
let key = score_key(&score);
progress_tracker.add_section(
&key,
&format!(
"{} Interpreting score: {score}...",
crate::theme::EMOJI_SCORE
),
);
key
};
progress_tracker.add_task(&section_key, &task_key, &message);
}
HarmonyEvent::InterpretExecutionFinished {
execution_id: _,
execution_id: task_key,
topology: _,
interpret: _,
score,
@@ -165,17 +155,16 @@ async fn handle_events() {
match outcome {
Ok(outcome) => match outcome.status {
harmony::interpret::InterpretStatus::SUCCESS => {
info!(status = "finished"; "{}", outcome.message);
progress_tracker.finish_task(&task_key, &outcome.message);
}
harmony::interpret::InterpretStatus::NOOP => {
info!(status = "skipped"; "{}", outcome.message);
}
_ => {
error!(status = "failed"; "{}", outcome.message);
progress_tracker.skip_task(&task_key, &outcome.message);
}
_ => progress_tracker.fail_task(&task_key, &outcome.message),
},
Err(err) => {
error!(status = "failed"; "{}", err);
error!("Interpret error: {err}");
progress_tracker.fail_task(&task_key, &err.to_string());
}
}
}
@@ -184,17 +173,30 @@ async fn handle_events() {
application,
feature,
status,
} => match status {
ApplicationFeatureStatus::Installing => {
info!("Installing feature '{}' for '{}'...", feature, application);
} => {
if let Some(score) = &(*current_score) {
let section_key = score_key(score);
let task_key = app_feature_key(&application, &feature);
match status {
ApplicationFeatureStatus::Installing => {
let message = format!("Feature '{}' installing...", feature);
progress_tracker.add_task(&section_key, &task_key, &message);
}
ApplicationFeatureStatus::Installed => {
let message = format!("Feature '{}' installed", feature);
progress_tracker.finish_task(&task_key, &message);
}
ApplicationFeatureStatus::Failed { details } => {
let message = format!(
"Feature '{}' installation failed: {}",
feature, details
);
progress_tracker.fail_task(&task_key, &message);
}
}
}
ApplicationFeatureStatus::Installed => {
info!(status = "finished"; "Feature '{}' installed", feature);
}
ApplicationFeatureStatus::Failed { details } => {
error!(status = "failed"; "Feature '{}' installation failed: {}", feature, details);
}
},
}
}
true
}
@@ -202,3 +204,15 @@ async fn handle_events() {
})
.await;
}
fn topology_key(topology: &str) -> String {
format!("topology-{topology}")
}
fn score_key(score: &str) -> String {
format!("score-{score}")
}
fn app_feature_key(application: &str, feature: &str) -> String {
format!("app-{application}-{feature}")
}

View File

@@ -90,37 +90,13 @@ pub async fn run<T: Topology + Send + Sync + 'static>(
topology: T,
scores: Vec<Box<dyn Score<T>>>,
args_struct: Option<Args>,
) -> Result<(), Box<dyn std::error::Error>> {
let args = match args_struct {
Some(args) => args,
None => Args::parse(),
};
#[cfg(not(feature = "tui"))]
if args.interactive {
return Err("Not compiled with interactive support".into());
}
#[cfg(feature = "tui")]
if args.interactive {
return harmony_tui::run(inventory, topology, scores).await;
}
run_cli(inventory, topology, scores, args).await
}
pub async fn run_cli<T: Topology + Send + Sync + 'static>(
inventory: Inventory,
topology: T,
scores: Vec<Box<dyn Score<T>>>,
args: Args,
) -> Result<(), Box<dyn std::error::Error>> {
let cli_logger_handle = cli_logger::init();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(scores);
let result = init(maestro, args).await;
let result = init(maestro, args_struct).await;
instrumentation::instrument(instrumentation::HarmonyEvent::HarmonyFinished).unwrap();
let _ = tokio::try_join!(cli_logger_handle);
@@ -129,8 +105,23 @@ pub async fn run_cli<T: Topology + Send + Sync + 'static>(
async fn init<T: Topology + Send + Sync + 'static>(
maestro: harmony::maestro::Maestro<T>,
args: Args,
args_struct: Option<Args>,
) -> Result<(), Box<dyn std::error::Error>> {
let args = match args_struct {
Some(args) => args,
None => Args::parse(),
};
#[cfg(feature = "tui")]
if args.interactive {
return harmony_tui::init(maestro).await;
}
#[cfg(not(feature = "tui"))]
if args.interactive {
return Err("Not compiled with interactive support".into());
}
let _ = env_logger::builder().try_init();
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
@@ -202,14 +193,14 @@ mod tests {
let maestro = init_test_maestro();
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: Some("SuccessScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
},
}),
)
.await;
@@ -222,14 +213,14 @@ mod tests {
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: Some("ErrorScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
},
}),
)
.await;
@@ -242,14 +233,14 @@ mod tests {
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: None,
interactive: false,
all: false,
number: 0,
list: false,
},
}),
)
.await;

View File

@@ -1,13 +0,0 @@
[package]
name = "harmony_inventory_agent"
version = "0.1.0"
edition = "2024"
[dependencies]
actix-web = "4.4"
sysinfo = "0.30"
serde.workspace = true
serde_json.workspace = true
log.workspace = true
env_logger.workspace = true
uuid.workspace = true

View File

@@ -1,571 +0,0 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::fs;
use std::path::Path;
use std::process::Command;
use sysinfo::System;
#[derive(Serialize, Deserialize, Debug)]
pub struct PhysicalHost {
pub storage_drives: Vec<StorageDrive>,
pub storage_controller: StorageController,
pub memory_modules: Vec<MemoryModule>,
pub cpus: Vec<CPU>,
pub chipset: Chipset,
pub network_interfaces: Vec<NetworkInterface>,
pub management_interface: Option<ManagementInterface>,
pub host_uuid: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct StorageDrive {
pub name: String,
pub model: String,
pub serial: String,
pub size_bytes: u64,
pub logical_block_size: u32,
pub physical_block_size: u32,
pub rotational: bool,
pub wwn: Option<String>,
pub interface_type: String,
pub smart_status: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct StorageController {
pub name: String,
pub driver: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MemoryModule {
pub size_bytes: u64,
pub speed_mhz: Option<u32>,
pub manufacturer: Option<String>,
pub part_number: Option<String>,
pub serial_number: Option<String>,
pub rank: Option<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CPU {
pub model: String,
pub vendor: String,
pub cores: u32,
pub threads: u32,
pub frequency_mhz: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Chipset {
pub name: String,
pub vendor: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct NetworkInterface {
pub name: String,
pub mac_address: String,
pub speed_mbps: Option<u32>,
pub is_up: bool,
pub mtu: u32,
pub ipv4_addresses: Vec<String>,
pub ipv6_addresses: Vec<String>,
pub driver: String,
pub firmware_version: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ManagementInterface {
pub kind: String,
pub address: Option<String>,
pub firmware: Option<String>,
}
impl PhysicalHost {
pub fn gather() -> Self {
let mut sys = System::new_all();
sys.refresh_all();
Self {
storage_drives: Self::gather_storage_drives(),
storage_controller: Self::gather_storage_controller(),
memory_modules: Self::gather_memory_modules(),
cpus: Self::gather_cpus(&sys),
chipset: Self::gather_chipset(),
network_interfaces: Self::gather_network_interfaces(),
management_interface: Self::gather_management_interface(),
host_uuid: Self::get_host_uuid(),
}
}
fn gather_storage_drives() -> Vec<StorageDrive> {
let mut drives = Vec::new();
// Use lsblk with JSON output for robust parsing
if let Ok(output) = Command::new("lsblk")
.args([
"-d",
"-o",
"NAME,MODEL,SERIAL,SIZE,ROTA,WWN",
"-n",
"-e",
"7",
"--json",
])
.output()
&& output.status.success()
&& let Ok(json) = serde_json::from_slice::<Value>(&output.stdout)
&& let Some(blockdevices) = json.get("blockdevices").and_then(|v| v.as_array())
{
for device in blockdevices {
let name = device
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
if name.is_empty() {
continue;
}
let model = device
.get("model")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.unwrap_or_default();
let serial = device
.get("serial")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.unwrap_or_default();
let size_str = device.get("size").and_then(|v| v.as_str()).unwrap_or("0");
let size_bytes = Self::parse_size(size_str).unwrap_or(0);
let rotational = device
.get("rota")
.and_then(|v| v.as_bool())
.unwrap_or(false);
let wwn = device
.get("wwn")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty() && s != "null");
let device_path = Path::new("/sys/block").join(&name);
let mut drive = StorageDrive {
name: name.clone(),
model,
serial,
size_bytes,
logical_block_size: Self::read_sysfs_u32(
&device_path.join("queue/logical_block_size"),
)
.unwrap_or(512),
physical_block_size: Self::read_sysfs_u32(
&device_path.join("queue/physical_block_size"),
)
.unwrap_or(512),
rotational,
wwn,
interface_type: Self::get_interface_type(&name, &device_path),
smart_status: Self::get_smart_status(&name),
};
// Enhance with additional sysfs info if available
if device_path.exists() {
if drive.model.is_empty() {
drive.model = Self::read_sysfs_string(&device_path.join("device/model"));
}
if drive.serial.is_empty() {
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"));
}
}
drives.push(drive);
}
}
drives
}
fn gather_storage_controller() -> StorageController {
let mut controller = StorageController {
name: "Unknown".to_string(),
driver: "Unknown".to_string(),
};
// Use lspci with JSON output if available
if let Ok(output) = Command::new("lspci")
.args(["-nn", "-d", "::0100", "-J"]) // Storage controllers class with JSON
.output()
&& output.status.success()
&& let Ok(json) = serde_json::from_slice::<Value>(&output.stdout)
&& let Some(devices) = json.as_array()
{
for device in devices {
if let Some(device_info) = device.as_object()
&& let Some(name) = device_info
.get("device")
.and_then(|v| v.as_object())
.and_then(|v| v.get("name"))
.and_then(|v| v.as_str())
{
controller.name = name.to_string();
break;
}
}
}
// Fallback to text output if JSON fails
if controller.name == "Unknown"
&& let Ok(output) = Command::new("lspci")
.args(["-nn", "-d", "::0100"]) // Storage controllers class
.output()
&& output.status.success()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Some(line) = output_str.lines().next() {
let parts: Vec<&str> = line.split(':').collect();
if parts.len() > 2 {
controller.name = parts[2].trim().to_string();
}
}
}
// Try to get driver info from lsmod
if let Ok(output) = Command::new("lsmod").output()
&& output.status.success()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("ahci")
|| line.contains("nvme")
|| line.contains("megaraid")
|| line.contains("mpt3sas")
{
let parts: Vec<&str> = line.split_whitespace().collect();
if !parts.is_empty() {
controller.driver = parts[0].to_string();
break;
}
}
}
}
controller
}
fn gather_memory_modules() -> Vec<MemoryModule> {
let mut modules = Vec::new();
if let Ok(output) = Command::new("dmidecode").arg("--type").arg("17").output()
&& output.status.success()
{
let output_str = String::from_utf8_lossy(&output.stdout);
let sections: Vec<&str> = output_str.split("Memory Device").collect();
for section in sections.into_iter().skip(1) {
let mut module = MemoryModule {
size_bytes: 0,
speed_mhz: None,
manufacturer: None,
part_number: None,
serial_number: None,
rank: None,
};
for line in section.lines() {
let line = line.trim();
if let Some(size_str) = line.strip_prefix("Size: ") {
if size_str != "No Module Installed"
&& let Some((num, unit)) = size_str.split_once(' ')
&& let Ok(num) = num.parse::<u64>()
{
module.size_bytes = match unit {
"MB" => num * 1024 * 1024,
"GB" => num * 1024 * 1024 * 1024,
"KB" => num * 1024,
_ => 0,
};
}
} else if let Some(speed_str) = line.strip_prefix("Speed: ") {
if let Some((num, _unit)) = speed_str.split_once(' ') {
module.speed_mhz = num.parse().ok();
}
} else if let Some(man) = line.strip_prefix("Manufacturer: ") {
module.manufacturer = Some(man.to_string());
} else if let Some(part) = line.strip_prefix("Part Number: ") {
module.part_number = Some(part.to_string());
} else if let Some(serial) = line.strip_prefix("Serial Number: ") {
module.serial_number = Some(serial.to_string());
} else if let Some(rank) = line.strip_prefix("Rank: ") {
module.rank = rank.parse().ok();
}
}
if module.size_bytes > 0 {
modules.push(module);
}
}
}
modules
}
fn gather_cpus(sys: &System) -> Vec<CPU> {
let mut cpus = Vec::new();
let global_cpu = sys.global_cpu_info();
cpus.push(CPU {
model: global_cpu.brand().to_string(),
vendor: global_cpu.vendor_id().to_string(),
cores: sys.physical_core_count().unwrap_or(1) as u32,
threads: sys.cpus().len() as u32,
frequency_mhz: global_cpu.frequency(),
});
cpus
}
fn gather_chipset() -> Chipset {
Chipset {
name: Self::read_dmi("board-product-name").unwrap_or_else(|| "Unknown".to_string()),
vendor: Self::read_dmi("board-manufacturer").unwrap_or_else(|| "Unknown".to_string()),
}
}
fn gather_network_interfaces() -> Vec<NetworkInterface> {
let mut interfaces = Vec::new();
let sys_net_path = Path::new("/sys/class/net");
if let Ok(entries) = fs::read_dir(sys_net_path) {
for entry in entries.flatten() {
let iface_name = entry.file_name().into_string().unwrap_or_default();
let iface_path = entry.path();
// Skip virtual interfaces
if iface_name.starts_with("lo")
|| iface_name.starts_with("docker")
|| iface_name.starts_with("virbr")
|| iface_name.starts_with("veth")
|| iface_name.starts_with("br-")
|| iface_name.starts_with("tun")
|| iface_name.starts_with("wg")
{
continue;
}
// Check if it's a physical interface by looking for device directory
if !iface_path.join("device").exists() {
continue;
}
let mac_address = Self::read_sysfs_string(&iface_path.join("address"));
let speed_mbps = Self::read_sysfs_u32(&iface_path.join("speed"));
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"));
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu")).unwrap_or(1500);
let driver = Self::read_sysfs_string(&iface_path.join("device/driver/module"));
let firmware_version =
Self::read_sysfs_opt_string(&iface_path.join("device/firmware_version"));
// Get IP addresses using ip command with JSON output
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name);
interfaces.push(NetworkInterface {
name: iface_name,
mac_address,
speed_mbps,
is_up: operstate == "up",
mtu,
ipv4_addresses,
ipv6_addresses,
driver,
firmware_version,
});
}
}
interfaces
}
fn gather_management_interface() -> Option<ManagementInterface> {
// Try to detect common management interfaces
if Path::new("/dev/ipmi0").exists() {
Some(ManagementInterface {
kind: "IPMI".to_string(),
address: None,
firmware: Self::read_dmi("bios-version"),
})
} else if Path::new("/sys/class/misc/mei").exists() {
Some(ManagementInterface {
kind: "Intel ME".to_string(),
address: None,
firmware: None,
})
} else {
None
}
}
fn get_host_uuid() -> String {
Self::read_dmi("system-uuid").unwrap()
}
// Helper methods
fn read_sysfs_string(path: &Path) -> String {
fs::read_to_string(path)
.unwrap_or_default()
.trim()
.to_string()
}
fn read_sysfs_opt_string(path: &Path) -> Option<String> {
fs::read_to_string(path)
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
}
fn read_sysfs_u32(path: &Path) -> Option<u32> {
fs::read_to_string(path)
.ok()
.and_then(|s| s.trim().parse().ok())
}
fn read_dmi(field: &str) -> Option<String> {
Command::new("dmidecode")
.arg("-s")
.arg(field)
.output()
.ok()
.filter(|output| output.status.success())
.and_then(|output| String::from_utf8(output.stdout).ok())
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
}
fn get_interface_type(device_name: &str, device_path: &Path) -> String {
if device_name.starts_with("nvme") {
"NVMe".to_string()
} else if device_name.starts_with("sd") {
"SATA".to_string()
} else if device_name.starts_with("hd") {
"IDE".to_string()
} else if device_name.starts_with("vd") {
"VirtIO".to_string()
} else {
// Try to determine from device path
Self::read_sysfs_string(&device_path.join("device/subsystem"))
.split('/')
.next_back()
.unwrap_or("Unknown")
.to_string()
}
}
fn get_smart_status(device_name: &str) -> Option<String> {
Command::new("smartctl")
.arg("-H")
.arg(format!("/dev/{}", device_name))
.output()
.ok()
.filter(|output| output.status.success())
.and_then(|output| String::from_utf8(output.stdout).ok())
.and_then(|s| {
s.lines()
.find(|line| line.contains("SMART overall-health self-assessment"))
.and_then(|line| line.split(':').nth(1))
.map(|s| s.trim().to_string())
})
}
fn parse_size(size_str: &str) -> Option<u64> {
if size_str.ends_with('T') {
size_str[..size_str.len() - 1]
.parse::<u64>()
.ok()
.map(|t| t * 1024 * 1024 * 1024 * 1024)
} else if size_str.ends_with('G') {
size_str[..size_str.len() - 1]
.parse::<u64>()
.ok()
.map(|g| g * 1024 * 1024 * 1024)
} else if size_str.ends_with('M') {
size_str[..size_str.len() - 1]
.parse::<u64>()
.ok()
.map(|m| m * 1024 * 1024)
} else if size_str.ends_with('K') {
size_str[..size_str.len() - 1]
.parse::<u64>()
.ok()
.map(|k| k * 1024)
} else if size_str.ends_with('B') {
size_str[..size_str.len() - 1].parse::<u64>().ok()
} else {
size_str.parse::<u64>().ok()
}
}
fn get_interface_ips_json(iface_name: &str) -> (Vec<String>, Vec<String>) {
let mut ipv4 = Vec::new();
let mut ipv6 = Vec::new();
// Get IPv4 addresses using JSON output
if let Ok(output) = Command::new("ip")
.args(["-j", "-4", "addr", "show", iface_name])
.output()
&& output.status.success()
&& let Ok(json) = serde_json::from_slice::<Value>(&output.stdout)
&& let Some(addrs) = json.as_array()
{
for addr_info in addrs {
if let Some(addr_info_obj) = addr_info.as_object()
&& let Some(addr_info) =
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
{
for addr in addr_info {
if let Some(addr_obj) = addr.as_object()
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
{
ipv4.push(ip.to_string());
}
}
}
}
}
// Get IPv6 addresses using JSON output
if let Ok(output) = Command::new("ip")
.args(["-j", "-6", "addr", "show", iface_name])
.output()
&& output.status.success()
&& let Ok(json) = serde_json::from_slice::<Value>(&output.stdout)
&& let Some(addrs) = json.as_array()
{
for addr_info in addrs {
if let Some(addr_info_obj) = addr_info.as_object()
&& let Some(addr_info) =
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
{
for addr in addr_info {
if let Some(addr_obj) = addr.as_object()
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
{
// Skip link-local addresses
if !ip.starts_with("fe80::") {
ipv6.push(ip.to_string());
}
}
}
}
}
}
(ipv4, ipv6)
}
}

View File

@@ -1,29 +0,0 @@
// src/main.rs
use actix_web::{App, HttpServer, Responder, get};
use hwinfo::PhysicalHost;
use std::env;
mod hwinfo;
#[get("/inventory")]
async fn inventory() -> impl Responder {
log::info!("Received inventory request");
let host = PhysicalHost::gather();
log::info!("Inventory data gathered successfully");
actix_web::HttpResponse::Ok().json(host)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init();
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
let bind_addr = format!("0.0.0.0:{}", port);
log::info!("Starting inventory agent on {}", bind_addr);
HttpServer::new(|| App::new().service(inventory))
.bind(&bind_addr)?
.run()
.await
}

View File

@@ -1,23 +0,0 @@
[package]
name = "harmony-secret"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
thiserror.workspace = true
lazy_static.workspace = true
directories.workspace = true
log.workspace = true
infisical = "0.0.2"
tokio.workspace = true
async-trait.workspace = true
http.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true
tempfile.workspace = true

View File

@@ -1,18 +0,0 @@
use lazy_static::lazy_static;
lazy_static! {
pub static ref SECRET_NAMESPACE: String =
std::env::var("HARMONY_SECRET_NAMESPACE").expect("HARMONY_SECRET_NAMESPACE environment variable is required, it should contain the name of the project you are working on to access its secrets");
pub static ref SECRET_STORE: Option<String> =
std::env::var("HARMONY_SECRET_STORE").ok();
pub static ref INFISICAL_URL: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_URL").ok();
pub static ref INFISICAL_PROJECT_ID: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_PROJECT_ID").ok();
pub static ref INFISICAL_ENVIRONMENT: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_ENVIRONMENT").ok();
pub static ref INFISICAL_CLIENT_ID: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_ID").ok();
pub static ref INFISICAL_CLIENT_SECRET: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_SECRET").ok();
}

View File

@@ -1,166 +0,0 @@
pub mod config;
mod store;
use crate::config::SECRET_NAMESPACE;
use async_trait::async_trait;
use config::INFISICAL_CLIENT_ID;
use config::INFISICAL_CLIENT_SECRET;
use config::INFISICAL_ENVIRONMENT;
use config::INFISICAL_PROJECT_ID;
use config::INFISICAL_URL;
use config::SECRET_STORE;
use serde::{Serialize, de::DeserializeOwned};
use std::fmt;
use store::InfisicalSecretStore;
use store::LocalFileSecretStore;
use thiserror::Error;
use tokio::sync::OnceCell;
pub use harmony_secret_derive::Secret;
// The Secret trait remains the same.
pub trait Secret: Serialize + DeserializeOwned + Sized {
const KEY: &'static str;
}
// The error enum remains the same.
#[derive(Debug, Error)]
pub enum SecretStoreError {
#[error("Secret not found for key '{key}' in namespace '{namespace}'")]
NotFound { namespace: String, key: String },
#[error("Failed to deserialize secret for key '{key}': {source}")]
Deserialization {
key: String,
source: serde_json::Error,
},
#[error("Failed to serialize secret for key '{key}': {source}")]
Serialization {
key: String,
source: serde_json::Error,
},
#[error("Underlying storage error: {0}")]
Store(#[from] Box<dyn std::error::Error + Send + Sync>),
}
// The trait is now async!
#[async_trait]
pub trait SecretStore: fmt::Debug + Send + Sync {
async fn get_raw(&self, namespace: &str, key: &str) -> Result<Vec<u8>, SecretStoreError>;
async fn set_raw(
&self,
namespace: &str,
key: &str,
value: &[u8],
) -> Result<(), SecretStoreError>;
}
// Use OnceCell for async-friendly, one-time initialization.
static SECRET_MANAGER: OnceCell<SecretManager> = OnceCell::const_new();
/// Initializes and returns a reference to the global SecretManager.
async fn get_secret_manager() -> &'static SecretManager {
SECRET_MANAGER.get_or_init(init_secret_manager).await
}
/// The async initialization function for the SecretManager.
async fn init_secret_manager() -> SecretManager {
let default_secret_score = "infisical".to_string();
let store_type = SECRET_STORE.as_ref().unwrap_or(&default_secret_score);
let store: Box<dyn SecretStore> = match store_type.as_str() {
"file" => Box::new(LocalFileSecretStore::default()),
"infisical" | _ => {
let store = InfisicalSecretStore::new(
INFISICAL_URL.clone().expect("Infisical url must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_URL"),
INFISICAL_PROJECT_ID.clone().expect("Infisical project id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_PROJECT_ID"),
INFISICAL_ENVIRONMENT.clone().expect("Infisical environment must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_ENVIRONMENT"),
INFISICAL_CLIENT_ID.clone().expect("Infisical client id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_ID"),
INFISICAL_CLIENT_SECRET.clone().expect("Infisical client secret must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_SECRET"),
)
.await
.expect("Failed to initialize Infisical secret store");
Box::new(store)
}
};
SecretManager::new(SECRET_NAMESPACE.clone(), store)
}
/// Manages the lifecycle of secrets, providing a simple static API.
#[derive(Debug)]
pub struct SecretManager {
namespace: String,
store: Box<dyn SecretStore>,
}
impl SecretManager {
fn new(namespace: String, store: Box<dyn SecretStore>) -> Self {
Self { namespace, store }
}
/// Retrieves and deserializes a secret.
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
let manager = get_secret_manager().await;
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
key: T::KEY.to_string(),
source: e,
})
}
/// Serializes and stores a secret.
pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> {
let manager = get_secret_manager().await;
let raw_value =
serde_json::to_vec(secret).map_err(|e| SecretStoreError::Serialization {
key: T::KEY.to_string(),
source: e,
})?;
manager
.store
.set_raw(&manager.namespace, T::KEY, &raw_value)
.await
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct TestUserMeta {
labels: Vec<String>,
}
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct TestSecret {
user: String,
password: String,
metadata: TestUserMeta,
}
#[cfg(secrete2etest)]
#[tokio::test]
async fn set_and_retrieve_secret() {
let secret = TestSecret {
user: String::from("user"),
password: String::from("password"),
metadata: TestUserMeta {
labels: vec![
String::from("label1"),
String::from("label2"),
String::from(
"some longet label with \" special @#%$)(udiojcia[]]] \"'asdij'' characters Nдs はにほへとちり าฟันพัฒนา yağız şoföre ç <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> 👩‍👩‍👧‍👦 /span> 👩‍👧‍👦 and why not emojis ",
),
],
},
};
SecretManager::set(&secret).await.unwrap();
let value = SecretManager::get::<TestSecret>().await.unwrap();
assert_eq!(value, secret);
}
}

View File

@@ -1,129 +0,0 @@
use crate::{SecretStore, SecretStoreError};
use async_trait::async_trait;
use infisical::{
AuthMethod, InfisicalError,
client::Client,
secrets::{CreateSecretRequest, GetSecretRequest, UpdateSecretRequest},
};
use log::{info, warn};
#[derive(Debug)]
pub struct InfisicalSecretStore {
client: Client,
project_id: String,
environment: String,
}
impl InfisicalSecretStore {
/// Creates a new, authenticated Infisical client.
pub async fn new(
base_url: String,
project_id: String,
environment: String,
client_id: String,
client_secret: String,
) -> Result<Self, InfisicalError> {
info!("INFISICAL_STORE: Initializing client for URL: {base_url}");
// The builder and login logic remains the same.
let mut client = Client::builder().base_url(base_url).build().await?;
let auth_method = AuthMethod::new_universal_auth(client_id, client_secret);
client.login(auth_method).await?;
info!("INFISICAL_STORE: Client authenticated successfully.");
Ok(Self {
client,
project_id,
environment,
})
}
}
#[async_trait]
impl SecretStore for InfisicalSecretStore {
async fn get_raw(&self, _environment: &str, key: &str) -> Result<Vec<u8>, SecretStoreError> {
let environment = &self.environment;
info!("INFISICAL_STORE: Getting key '{key}' from environment '{environment}'");
let request = GetSecretRequest::builder(key, &self.project_id, environment).build();
match self.client.secrets().get(request).await {
Ok(secret) => Ok(secret.secret_value.into_bytes()),
Err(e) => {
// Correctly match against the actual InfisicalError enum.
match e {
// The specific case for a 404 Not Found error.
InfisicalError::HttpError { status, .. }
if status == http::StatusCode::NOT_FOUND =>
{
Err(SecretStoreError::NotFound {
namespace: environment.to_string(),
key: key.to_string(),
})
}
// For all other errors, wrap them in our generic Store error.
_ => Err(SecretStoreError::Store(Box::new(e))),
}
}
}
}
async fn set_raw(
&self,
_environment: &str,
key: &str,
val: &[u8],
) -> Result<(), SecretStoreError> {
info!(
"INFISICAL_STORE: Setting key '{key}' in environment '{}'",
self.environment
);
let value_str =
String::from_utf8(val.to_vec()).map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// --- Upsert Logic ---
// First, attempt to update the secret.
let update_req = UpdateSecretRequest::builder(key, &self.project_id, &self.environment)
.secret_value(&value_str)
.build();
match self.client.secrets().update(update_req).await {
Ok(_) => {
info!("INFISICAL_STORE: Successfully updated secret '{key}'.");
Ok(())
}
Err(e) => {
// If the update failed, check if it was because the secret doesn't exist.
match e {
InfisicalError::HttpError { status, .. }
if status == http::StatusCode::NOT_FOUND =>
{
// The secret was not found, so we create it instead.
warn!(
"INFISICAL_STORE: Secret '{key}' not found for update, attempting to create it."
);
let create_req = CreateSecretRequest::builder(
key,
&value_str,
&self.project_id,
&self.environment,
)
.build();
// Handle potential errors during creation.
self.client
.secrets()
.create(create_req)
.await
.map_err(|create_err| SecretStoreError::Store(Box::new(create_err)))?;
info!("INFISICAL_STORE: Successfully created secret '{key}'.");
Ok(())
}
// Any other error during update is a genuine failure.
_ => Err(SecretStoreError::Store(Box::new(e))),
}
}
}
}
}

View File

@@ -1,105 +0,0 @@
use async_trait::async_trait;
use log::info;
use std::path::{Path, PathBuf};
use crate::{SecretStore, SecretStoreError};
#[derive(Debug, Default)]
pub struct LocalFileSecretStore;
impl LocalFileSecretStore {
/// Helper to consistently generate the secret file path.
fn get_file_path(base_dir: &Path, ns: &str, key: &str) -> PathBuf {
base_dir.join(format!("{ns}_{key}.json"))
}
}
#[async_trait]
impl SecretStore for LocalFileSecretStore {
async fn get_raw(&self, ns: &str, key: &str) -> Result<Vec<u8>, SecretStoreError> {
let data_dir = directories::BaseDirs::new()
.expect("Could not find a valid home directory")
.data_dir()
.join("harmony")
.join("secrets");
let file_path = Self::get_file_path(&data_dir, ns, key);
info!(
"LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}",
file_path.display()
);
tokio::fs::read(&file_path)
.await
.map_err(|_| SecretStoreError::NotFound {
namespace: ns.to_string(),
key: key.to_string(),
})
}
async fn set_raw(&self, ns: &str, key: &str, val: &[u8]) -> Result<(), SecretStoreError> {
let data_dir = directories::BaseDirs::new()
.expect("Could not find a valid home directory")
.data_dir()
.join("harmony")
.join("secrets");
let file_path = Self::get_file_path(&data_dir, ns, key);
info!(
"LOCAL_STORE: Setting key '{key}' in namespace '{ns}' at {}",
file_path.display()
);
if let Some(parent_dir) = file_path.parent() {
tokio::fs::create_dir_all(parent_dir)
.await
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
}
tokio::fs::write(&file_path, val)
.await
.map_err(|e| SecretStoreError::Store(Box::new(e)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[tokio::test]
async fn test_set_and_get_raw_successfully() {
let dir = tempdir().unwrap();
let store = LocalFileSecretStore::default();
let ns = "test-ns";
let key = "test-key";
let value = b"{\"data\":\"test-value\"}";
// To test the store directly, we override the base directory logic.
// For this test, we'll manually construct the path within our temp dir.
let file_path = LocalFileSecretStore::get_file_path(dir.path(), ns, key);
// Manually write to the temp path to simulate the store's behavior
tokio::fs::create_dir_all(file_path.parent().unwrap())
.await
.unwrap();
tokio::fs::write(&file_path, value).await.unwrap();
// Now, test get_raw by reading from that same temp path (by mocking the path logic)
let retrieved_value = tokio::fs::read(&file_path).await.unwrap();
assert_eq!(retrieved_value, value);
}
#[tokio::test]
async fn test_get_raw_not_found() {
let dir = tempdir().unwrap();
let ns = "test-ns";
let key = "non-existent-key";
// We need to check if reading a non-existent file gives the correct error
let file_path = LocalFileSecretStore::get_file_path(dir.path(), ns, key);
let result = tokio::fs::read(&file_path).await;
assert!(matches!(result, Err(_)));
}
}

View File

@@ -1,4 +0,0 @@
mod infisical;
mod local_file;
pub use infisical::*;
pub use local_file::*;

View File

@@ -1,8 +0,0 @@
export HARMONY_SECRET_NAMESPACE=harmony_test_secrets
export HARMONY_SECRET_INFISICAL_URL=http://localhost
export HARMONY_SECRET_INFISICAL_PROJECT_ID=eb4723dc-eede-44d7-98cc-c8e0caf29ccb
export HARMONY_SECRET_INFISICAL_ENVIRONMENT=dev
export HARMONY_SECRET_INFISICAL_CLIENT_ID=dd16b07f-0e38-4090-a1d0-922de9f44d91
export HARMONY_SECRET_INFISICAL_CLIENT_SECRET=bd2ae054e7759b11ca2e908494196337cc800bab138cb1f59e8d9b15ca3f286f
cargo test

View File

@@ -1,13 +0,0 @@
[package]
name = "harmony-secret-derive"
version = "0.1.0"
edition = "2024"
[lib]
proc-macro = true
[dependencies]
quote = "1.0"
proc-macro2 = "1.0"
proc-macro-crate = "3.3"
syn = "2.0"

View File

@@ -1,38 +0,0 @@
use proc_macro::TokenStream;
use proc_macro_crate::{FoundCrate, crate_name};
use quote::quote;
use syn::{DeriveInput, Ident, parse_macro_input};
#[proc_macro_derive(Secret)]
pub fn derive_secret(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_ident = &input.ident;
// The key for the secret will be the stringified name of the struct itself.
// e.g., `struct OKDClusterSecret` becomes key `"OKDClusterSecret"`.
let key = struct_ident.to_string();
// Find the path to the `harmony_secret` crate.
let secret_crate_path = match crate_name("harmony-secret") {
Ok(FoundCrate::Itself) => quote!(crate),
Ok(FoundCrate::Name(name)) => {
let ident = Ident::new(&name, proc_macro2::Span::call_site());
quote!(::#ident)
}
Err(e) => {
return syn::Error::new(proc_macro2::Span::call_site(), e.to_string())
.to_compile_error()
.into();
}
};
// The generated code now implements `Secret` for the struct itself.
// The struct must also derive `Serialize` and `Deserialize` for this to be useful.
let expanded = quote! {
impl #secret_crate_path::Secret for #struct_ident {
const KEY: &'static str = #key;
}
};
TokenStream::from(expanded)
}

View File

@@ -9,13 +9,7 @@ use widget::{help::HelpWidget, score::ScoreListWidget};
use std::{panic, sync::Arc, time::Duration};
use crossterm::event::{Event, EventStream, KeyCode, KeyEventKind};
use harmony::{
instrumentation::{self, HarmonyEvent},
inventory::Inventory,
maestro::Maestro,
score::Score,
topology::Topology,
};
use harmony::{maestro::Maestro, score::Score, topology::Topology};
use ratatui::{
self, Frame,
layout::{Constraint, Layout, Position},
@@ -45,62 +39,22 @@ pub mod tui {
///
/// #[tokio::main]
/// async fn main() {
/// harmony_tui::run(
/// Inventory::autoload(),
/// HAClusterTopology::autoload(),
/// vec![
/// Box::new(SuccessScore {}),
/// Box::new(ErrorScore {}),
/// Box::new(PanicScore {}),
/// ]
/// ).await.unwrap();
/// let inventory = Inventory::autoload();
/// let topology = HAClusterTopology::autoload();
/// let mut maestro = Maestro::new_without_initialization(inventory, topology);
///
/// maestro.register_all(vec![
/// Box::new(SuccessScore {}),
/// Box::new(ErrorScore {}),
/// Box::new(PanicScore {}),
/// ]);
/// harmony_tui::init(maestro).await.unwrap();
/// }
/// ```
pub async fn run<T: Topology + Send + Sync + 'static>(
inventory: Inventory,
topology: T,
scores: Vec<Box<dyn Score<T>>>,
) -> Result<(), Box<dyn std::error::Error>> {
let handle = init_instrumentation().await;
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(scores);
let result = init(maestro).await;
let _ = tokio::try_join!(handle);
result
}
async fn init<T: Topology + Send + Sync + 'static>(
pub async fn init<T: Topology + Send + Sync + 'static>(
maestro: Maestro<T>,
) -> Result<(), Box<dyn std::error::Error>> {
let result = HarmonyTUI::new(maestro).init().await;
instrumentation::instrument(HarmonyEvent::HarmonyFinished).unwrap();
result
}
async fn init_instrumentation() -> tokio::task::JoinHandle<()> {
let handle = tokio::spawn(handle_harmony_events());
loop {
if instrumentation::instrument(HarmonyEvent::HarmonyStarted).is_ok() {
break;
}
}
handle
}
async fn handle_harmony_events() {
instrumentation::subscribe("Harmony TUI Logger", async |event| {
if let HarmonyEvent::HarmonyFinished = event {
return false;
};
true
})
.await;
HarmonyTUI::new(maestro).init().await
}
pub struct HarmonyTUI<T: Topology> {

View File

@@ -1,17 +0,0 @@
[package]
name = "iobench"
edition = "2024"
version = "1.0.0"
license = "AGPL-3.0-or-later"
description = "A small command line utility to run fio benchmarks on localhost or remote ssh or kubernetes host. Was born out of a need to benchmark various ceph configurations!"
[dependencies]
clap = { version = "4.0", features = ["derive"] }
chrono = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
csv = "1.1"
num_cpus = "1.13"
[workspace]

View File

@@ -1,10 +0,0 @@
This project was generated mostly by Gemini but it works so... :)
## To run iobench dashboard
```bash
virtualenv venv
source venv/bin/activate
pip install -r requirements_freeze.txt
python iobench-dash-v4.py
```

View File

@@ -1,229 +0,0 @@
import dash
from dash import dcc, html, Input, Output, State, clientside_callback, ClientsideFunction
import plotly.express as px
import pandas as pd
import dash_bootstrap_components as dbc
import io
# --- Data Loading and Preparation ---
# csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
# Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
# Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
# Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
# Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
# Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
# Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
# Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
# Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
# Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
# Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
# Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
# Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
# Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
# Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
# Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
# Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
# Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
# Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
# """
#
# df = pd.read_csv(io.StringIO(csv_data))
df = pd.read_csv("iobench.csv") # Replace with the actual file path
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
# --- App Initialization and Global Settings ---
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
# Create master lists of options for checklists
unique_labels = sorted(df['label'].unique())
unique_tests = sorted(df['test_name'].unique())
# Create a consistent color map for each unique label
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
# --- App Layout ---
app.layout = dbc.Container([
# Header
dbc.Row(dbc.Col(html.H1("Ceph iobench Performance Dashboard", className="text-primary"),), className="my-4 text-center"),
# Controls and Graphs Row
dbc.Row([
# Control Panel Column
dbc.Col([
dbc.Card([
dbc.CardBody([
html.H4("Control Panel", className="card-title"),
html.Hr(),
# Metric Selection
dbc.Label("1. Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
dcc.Checklist(
id='metric-checklist',
options=[
{'label': 'IOPS', 'value': 'iops'},
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
],
value=['iops', 'latency_mean_ms', 'bandwidth_mbps'], # Default selection
labelClassName="d-block"
),
html.Hr(),
# Configuration Selection
dbc.Label("2. Select Configurations:", html_for="config-checklist", className="fw-bold"),
dbc.ButtonGroup([
dbc.Button("All", id="config-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
dbc.Button("None", id="config-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
], className="mb-2"),
dcc.Checklist(
id='config-checklist',
options=[{'label': label, 'value': label} for label in unique_labels],
value=unique_labels, # Select all by default
labelClassName="d-block"
),
html.Hr(),
# Test Name Selection
dbc.Label("3. Select Tests:", html_for="test-checklist", className="fw-bold"),
dbc.ButtonGroup([
dbc.Button("All", id="test-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
dbc.Button("None", id="test-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
], className="mb-2"),
dcc.Checklist(
id='test-checklist',
options=[{'label': test, 'value': test} for test in unique_tests],
value=unique_tests, # Select all by default
labelClassName="d-block"
),
])
], className="mb-4")
], width=12, lg=4),
# Graph Display Column
dbc.Col(id='graph-container', width=12, lg=8)
])
], fluid=True)
# --- Callbacks ---
# Callback to handle "Select All" / "Select None" for configurations
@app.callback(
Output('config-checklist', 'value'),
Input('config-select-all', 'n_clicks'),
Input('config-select-none', 'n_clicks'),
prevent_initial_call=True
)
def select_all_none_configs(all_clicks, none_clicks):
ctx = dash.callback_context
if not ctx.triggered:
return dash.no_update
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'config-select-all':
return unique_labels
elif button_id == 'config-select-none':
return []
return dash.no_update
# Callback to handle "Select All" / "Select None" for tests
@app.callback(
Output('test-checklist', 'value'),
Input('test-select-all', 'n_clicks'),
Input('test-select-none', 'n_clicks'),
prevent_initial_call=True
)
def select_all_none_tests(all_clicks, none_clicks):
ctx = dash.callback_context
if not ctx.triggered:
return dash.no_update
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'test-select-all':
return unique_tests
elif button_id == 'test-select-none':
return []
return dash.no_update
# Main callback to update graphs based on all selections
@app.callback(
Output('graph-container', 'children'),
[Input('metric-checklist', 'value'),
Input('config-checklist', 'value'),
Input('test-checklist', 'value')]
)
def update_graphs(selected_metrics, selected_configs, selected_tests):
"""
This function is triggered when any control's value changes.
It generates and returns a list of graphs based on all user selections.
"""
# Handle cases where no selection is made to prevent errors and show a helpful message
if not all([selected_metrics, selected_configs, selected_tests]):
return dbc.Alert(
"Please select at least one item from each category (Metric, Configuration, and Test) to view data.",
color="info",
className="mt-4"
)
# Filter the DataFrame based on all selected criteria
filtered_df = df[df['label'].isin(selected_configs) & df['test_name'].isin(selected_tests)]
# If the filtered data is empty after selection, inform the user
if filtered_df.empty:
return dbc.Alert("No data available for the current selection.", color="warning", className="mt-4")
graph_list = []
metric_titles = {
'iops': 'IOPS Comparison (Higher is Better)',
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
}
for metric in selected_metrics:
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
fig = px.bar(
filtered_df,
x='test_name',
y=metric,
color='label',
barmode='group',
color_discrete_map=color_map,
error_y=error_y_param,
title=metric_titles.get(metric, metric),
labels={
"test_name": "Benchmark Test Name",
"iops": "IOPS",
"latency_mean_ms": "Mean Latency (ms)",
"bandwidth_mbps": "Bandwidth (MB/s)",
"label": "Cluster Configuration"
}
)
fig.update_layout(
height=500,
xaxis_title=None,
legend_title="Configuration",
title_x=0.5,
xaxis={'categoryorder': sort_order},
xaxis_tickangle=-45,
margin=dict(b=120) # Add bottom margin to prevent tick labels from being cut off
)
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
return graph_list
# --- Run the App ---
if __name__ == '__main__':
app.run(debug=True)

View File

@@ -1,29 +0,0 @@
blinker==1.9.0
certifi==2025.7.14
charset-normalizer==3.4.2
click==8.2.1
dash==3.2.0
dash-bootstrap-components==2.0.3
Flask==3.1.1
idna==3.10
importlib_metadata==8.7.0
itsdangerous==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
narwhals==2.0.1
nest-asyncio==1.6.0
numpy==2.3.2
packaging==25.0
pandas==2.3.1
plotly==6.2.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.4
retrying==1.4.1
setuptools==80.9.0
six==1.17.0
typing_extensions==4.14.1
tzdata==2025.2
urllib3==2.5.0
Werkzeug==3.1.3
zipp==3.23.0

View File

@@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: iobench
labels:
app: iobench
spec:
replicas: 1
selector:
matchLabels:
app: iobench
template:
metadata:
labels:
app: iobench
spec:
containers:
- name: fio
image: juicedata/fio:latest # Replace with your preferred fio image
imagePullPolicy: IfNotPresent
command: [ "sleep", "infinity" ] # Keeps the container running for kubectl exec
volumeMounts:
- name: iobench-pvc
mountPath: /data # Mount the PVC at /data
volumes:
- name: iobench-pvc
persistentVolumeClaim:
claimName: iobench-pvc # Matches your PVC name
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: iobench-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: ceph-block

View File

@@ -1,253 +0,0 @@
use std::fs;
use std::io::{self, Write};
use std::process::{Command, Stdio};
use std::thread;
use std::time::Duration;
use chrono::Local;
use clap::Parser;
use serde::{Deserialize, Serialize};
/// A simple yet powerful I/O benchmarking tool using fio.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Target for the benchmark.
/// Formats:
/// - localhost (default)
/// - ssh/{user}@{host}
/// - ssh/{user}@{host}:{port}
/// - k8s/{namespace}/{pod}
#[arg(short, long, default_value = "localhost")]
target: String,
#[arg(short, long, default_value = ".")]
benchmark_dir: String,
/// Comma-separated list of tests to run.
/// Available tests: read, write, randread, randwrite,
/// multiread, multiwrite, multirandread, multirandwrite.
#[arg(long, default_value = "read,write,randread,randwrite,multiread,multiwrite,multirandread,multirandwrite")]
tests: String,
/// Duration of each test in seconds.
#[arg(long, default_value_t = 15)]
duration: u64,
/// Output directory for results.
/// Defaults to ./iobench-{current_datetime}.
#[arg(long)]
output_dir: Option<String>,
/// The size of the test file for fio.
#[arg(long, default_value = "1G")]
size: String,
/// The block size for I/O operations.
#[arg(long, default_value = "4k")]
block_size: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct FioOutput {
jobs: Vec<FioJobResult>,
}
#[derive(Debug, Serialize, Deserialize)]
struct FioJobResult {
jobname: String,
read: FioMetrics,
write: FioMetrics,
}
#[derive(Debug, Serialize, Deserialize)]
struct FioMetrics {
bw: f64,
iops: f64,
clat_ns: LatencyMetrics,
}
#[derive(Debug, Serialize, Deserialize)]
struct LatencyMetrics {
mean: f64,
stddev: f64,
}
#[derive(Debug, Serialize)]
struct BenchmarkResult {
test_name: String,
iops: f64,
bandwidth_kibps: f64,
latency_mean_ms: f64,
latency_stddev_ms: f64,
}
fn main() -> io::Result<()> {
let args = Args::parse();
let output_dir = args.output_dir.unwrap_or_else(|| {
format!("./iobench-{}", Local::now().format("%Y-%m-%d-%H%M%S"))
});
fs::create_dir_all(&output_dir)?;
let tests_to_run: Vec<&str> = args.tests.split(',').collect();
let mut results = Vec::new();
for test in tests_to_run {
println!("--------------------------------------------------");
println!("Running test: {}", test);
let (rw, numjobs) = match test {
"read" => ("read", 1),
"write" => ("write", 1),
"randread" => ("randread", 1),
"randwrite" => ("randwrite", 1),
"multiread" => ("read", 4),
"multiwrite" => ("write", 4),
"multirandread" => ("randread", 4),
"multirandwrite" => ("randwrite", 4),
_ => {
eprintln!("Unknown test: {}. Skipping.", test);
continue;
}
};
let test_name = format!("{}-{}-sync-test", test, args.block_size);
let fio_command = format!(
"fio --filename={}/iobench_testfile --direct=1 --fsync=1 --rw={} --bs={} --numjobs={} --iodepth=1 --runtime={} --time_based --group_reporting --name={} --size={} --output-format=json",
args.benchmark_dir, rw, args.block_size, numjobs, args.duration, test_name, args.size
);
println!("Executing command:\n{}\n", fio_command);
let output = match run_command(&args.target, &fio_command) {
Ok(out) => out,
Err(e) => {
eprintln!("Failed to execute command for test {}: {}", test, e);
continue;
}
};
let result = parse_fio_output(&output, &test_name, rw);
// TODO store raw fio output and print it
match result {
Ok(res) => {
results.push(res);
}
Err(e) => {
eprintln!("Error parsing fio output for test {}: {}", test, e);
eprintln!("Raw output:\n{}", output);
}
}
println!("{output}");
println!("Test {} completed.", test);
// A brief pause to let the system settle before the next test.
thread::sleep(Duration::from_secs(2));
}
// Cleanup the test file on the target
println!("--------------------------------------------------");
println!("Cleaning up test file on target...");
let cleanup_command = "rm -f ./iobench_testfile";
if let Err(e) = run_command(&args.target, cleanup_command) {
eprintln!("Warning: Failed to clean up test file on target: {}", e);
} else {
println!("Cleanup successful.");
}
if results.is_empty() {
println!("\nNo benchmark results to display.");
return Ok(());
}
// Output results to a CSV file for easy analysis
let csv_path = format!("{}/summary.csv", output_dir);
let mut wtr = csv::Writer::from_path(&csv_path)?;
for result in &results {
wtr.serialize(result)?;
}
wtr.flush()?;
println!("\nBenchmark summary saved to {}", csv_path);
println!("\n--- Benchmark Results Summary ---");
println!("{:<25} {:>10} {:>18} {:>20} {:>22}", "Test Name", "IOPS", "Bandwidth (KiB/s)", "Latency Mean (ms)", "Latency StdDev (ms)");
println!("{:-<98}", "");
for result in results {
println!("{:<25} {:>10.2} {:>18.2} {:>20.4} {:>22.4}", result.test_name, result.iops, result.bandwidth_kibps, result.latency_mean_ms, result.latency_stddev_ms);
}
Ok(())
}
fn run_command(target: &str, command: &str) -> io::Result<String> {
let (program, args) = if target == "localhost" {
("sudo", vec!["sh".to_string(), "-c".to_string(), command.to_string()])
} else if target.starts_with("ssh/") {
let target_str = target.strip_prefix("ssh/").unwrap();
let ssh_target;
let mut ssh_args = vec!["-o".to_string(), "StrictHostKeyChecking=no".to_string()];
let port_parts: Vec<&str> = target_str.split(':').collect();
if port_parts.len() == 2 {
ssh_target = port_parts[0].to_string();
ssh_args.push("-p".to_string());
ssh_args.push(port_parts[1].to_string());
} else {
ssh_target = target_str.to_string();
}
ssh_args.push(ssh_target);
ssh_args.push(format!("sudo sh -c '{}'", command));
("ssh", ssh_args)
} else if target.starts_with("k8s/") {
let parts: Vec<&str> = target.strip_prefix("k8s/").unwrap().split('/').collect();
if parts.len() != 2 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid k8s target format. Expected k8s/{namespace}/{pod}"));
}
let namespace = parts[0];
let pod = parts[1];
("kubectl", vec!["exec".to_string(), "-n".to_string(), namespace.to_string(), pod.to_string(), "--".to_string(), "sh".to_string(), "-c".to_string(), command.to_string()])
} else {
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid target format"));
};
let mut cmd = Command::new(program);
cmd.args(&args);
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
let child = cmd.spawn()?;
let output = child.wait_with_output()?;
if !output.status.success() {
eprintln!("Command failed with status: {}", output.status);
io::stderr().write_all(&output.stderr)?;
return Err(io::Error::new(io::ErrorKind::Other, "Command execution failed"));
}
String::from_utf8(output.stdout)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
fn parse_fio_output(output: &str, test_name: &str, rw: &str) -> Result<BenchmarkResult, String> {
let fio_data: FioOutput = serde_json::from_str(output)
.map_err(|e| format!("Failed to deserialize fio JSON: {}", e))?;
let job_result = fio_data.jobs.iter()
.find(|j| j.jobname == test_name)
.ok_or_else(|| format!("Could not find job result for '{}' in fio output", test_name))?;
let metrics = if rw.contains("read") {
&job_result.read
} else {
&job_result.write
};
Ok(BenchmarkResult {
test_name: test_name.to_string(),
iops: metrics.iops,
bandwidth_kibps: metrics.bw,
latency_mean_ms: metrics.clat_ns.mean / 1_000_000.0,
latency_stddev_ms: metrics.clat_ns.stddev / 1_000_000.0,
})
}

View File

@@ -12,7 +12,7 @@ env_logger = { workspace = true }
yaserde = { git = "https://github.com/jggc/yaserde.git" }
yaserde_derive = { git = "https://github.com/jggc/yaserde.git" }
xml-rs = "0.8"
thiserror.workspace = true
thiserror = "1.0"
async-trait = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }