Compare commits

..

22 Commits

Author SHA1 Message Date
Ian Letourneau
5cc93d3107 use new harmony_cli::run 2025-08-04 17:22:08 -04:00
Ian Letourneau
569839bf66 Merge branch 'master' into feat/crd-alertmanager-configs 2025-08-04 17:04:50 -04:00
Ian Letourneau
e078f5c062 quick cleanup 2025-08-04 15:28:01 -04:00
Ian Letourneau
a8394cda47 simpler check to see if a crd exists + cleanup 2025-08-02 11:58:39 -04:00
Ian Letourneau
064f6d88ba quick cleanup 2025-08-02 11:10:50 -04:00
Ian Letourneau
9403581be5 fix check to ensure prometheus operator is installed 2025-08-02 11:10:20 -04:00
Ian Letourneau
056152a1e5 remove comment 2025-08-01 23:52:26 -04:00
Ian Letourneau
c6b255d0bd merge configure_receiver with AlertReceiver::install & cleanup unused stuff 2025-08-01 23:09:12 -04:00
Ian Letourneau
4b6bebcaf1 remove unnecessary configure_receivers method from trait 2025-08-01 18:26:05 -04:00
Ian Letourneau
961a300154 cleanup unused k3d prometheus monitoring score & simplify design 2025-08-01 17:59:18 -04:00
a5deda647b wip: need to convert the generic type AlertReceiver<CRDPrometheus> to CRDAlertManagerReceiver in k8sAnywhereTopology which extends AlertReceiver<CRDPrometheus> in order to be able to configure and install the receiver and its associated crd-alertmanagerconfigs to the cluster 2025-07-31 16:17:30 -04:00
0b965b6570 Merge remote-tracking branch 'origin/master' into feat/crd-alertmanager-configs 2025-07-28 15:22:24 -04:00
d7bce37b69 fix: cargo fmt 2025-07-28 15:18:46 -04:00
b56a30de3c fix: prometheus operator and grafana operator deploy application namespace on local k3d
if kube-prometheus-operator is present installation of prometheus-operator will skip
outside of local k3d installation installation of operator is skipped
2025-07-28 15:15:10 -04:00
b9e208f4cf feat: added default prometheus rules and grafana dashboard for application monitoring 2025-07-22 13:26:03 -04:00
1d8b503bd2 Xwip: uses a helm chart to deploy a prometheus operator if crd are ont present in cluster, and deploys a grafana operator.
added a sample dashboard and prometheus data source to grafana
2025-07-21 17:59:35 -04:00
114219385f wip:added impl for prometheuses, alertmanagers, prometheusrules, servicemonitors, and some default rules that are deployed for application monitor
working on implementing grafana crds via grafana operator
need to link prometheus rules and alert managers in prometheus, testing it shows that prometheus isnt detecting them automatically
2025-07-16 15:56:00 -04:00
1525ac2226 fix: git conflict 2025-07-14 14:34:53 -04:00
55a4e79ec4 fix: added updated Cargo 2025-07-14 14:18:32 -04:00
7b91088828 feat: added impl for webhook receiver for crd alertmanagerconfigs 2025-07-14 13:41:48 -04:00
e61ec015ab feat: added impl for Discordwebhook receiver to receive application alerts from namespaces from application feature 2025-07-14 13:06:47 -04:00
819f4a32fd wip: added an implementation of CRDalertmanagerconfigs that can be used to add a discord webhook receiver, currently the namespace is hard coded and there are a bunch of todos!() that need to be cleaned up, and flags need to be added so that alertmanager will automatically register the crd 2025-07-11 16:01:52 -04:00
124 changed files with 1007 additions and 4935 deletions

View File

@@ -9,7 +9,7 @@ jobs:
check:
runs-on: docker
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -7,7 +7,7 @@ on:
jobs:
package_harmony_composer:
container:
image: hub.nationtech.io/harmony/harmony_composer:latest
image: hub.nationtech.io/harmony/harmony_composer:latest@sha256:eb0406fcb95c63df9b7c4b19bc50ad7914dd8232ce98e9c9abef628e07c69386
runs-on: dind
steps:
- name: Checkout code
@@ -45,14 +45,14 @@ jobs:
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/tags/snapshot-latest" \
| jq -r '.id // empty')
if [ -n "$RELEASE_ID" ]; then
# Delete existing release
curl -X DELETE \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases/$RELEASE_ID"
fi
# Create new release
RESPONSE=$(curl -X POST \
-H "Authorization: token ${{ secrets.GITEATOKEN }}" \
@@ -65,7 +65,7 @@ jobs:
"prerelease": true
}' \
"https://git.nationtech.io/api/v1/repos/nationtech/harmony/releases")
echo "RELEASE_ID=$(echo $RESPONSE | jq -r '.id')" >> $GITHUB_ENV
- name: Upload Linux binary

648
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,6 @@ members = [
"harmony_cli",
"k3d",
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
]
[workspace.package]
@@ -23,7 +20,7 @@ readme = "README.md"
license = "GNU AGPL v3"
[workspace.dependencies]
log = { version = "0.4", features = ["kv"] }
log = "0.4"
env_logger = "0.11"
derive-new = "0.7"
async-trait = "0.1"
@@ -56,12 +53,6 @@ chrono = "0.4"
similar = "2"
uuid = { version = "1.11", features = ["v4", "fast-rng", "macro-diagnostics"] }
pretty_assertions = "1.4.1"
tempfile = "3.20.0"
bollard = "0.19.1"
base64 = "0.22.1"
tar = "0.4.44"
lazy_static = "1.5.0"
directories = "6.0.0"
thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"

View File

@@ -1,4 +1,4 @@
FROM docker.io/rust:1.89.0 AS build
FROM docker.io/rust:1.87.0 AS build
WORKDIR /app
@@ -6,14 +6,13 @@ COPY . .
RUN cargo build --release --bin harmony_composer
FROM docker.io/rust:1.89.0
FROM docker.io/rust:1.87.0
WORKDIR /app
RUN rustup target add x86_64-pc-windows-gnu
RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup component add rustfmt
RUN rustup component add clippy
RUN apt update
@@ -23,4 +22,4 @@ RUN apt install -y nodejs docker.io mingw-w64
COPY --from=build /app/target/release/harmony_composer .
ENTRYPOINT ["/app/harmony_composer"]
ENTRYPOINT ["/app/harmony_composer"]

View File

@@ -1,7 +1,5 @@
#!/bin/sh
set -e
cargo check --all-targets --all-features --keep-going
cargo fmt --check
cargo clippy
cargo test

View File

@@ -1,11 +1,17 @@
use std::{path::PathBuf, str::FromStr, sync::Arc};
use std::{path::PathBuf, sync::Arc};
use harmony::{
data::Id,
inventory::Inventory,
maestro::Maestro,
modules::{
application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring},
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
application::{
ApplicationScore, RustWebFramework, RustWebapp,
features::{ContinuousDelivery, Monitoring},
},
monitoring::alert_channel::{
discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver,
},
tenant::TenantScore,
},
topology::{K8sAnywhereTopology, Url, tenant::TenantConfig},
@@ -19,7 +25,7 @@ async fn main() {
//the TenantConfig.name must match
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
id: Id::from_str("test-tenant-id"),
name: "example-monitoring".to_string(),
..Default::default()
},

View File

@@ -125,47 +125,40 @@ spec:
name: nginx"#,
)
.unwrap();
deployment
return deployment;
}
fn nginx_deployment_2() -> Deployment {
let pod_template = PodTemplateSpec {
metadata: Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
let mut pod_template = PodTemplateSpec::default();
pod_template.metadata = Some(ObjectMeta {
labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
..Default::default()
});
pod_template.spec = Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}),
spec: Some(PodSpec {
containers: vec![Container {
name: "nginx".to_string(),
image: Some("nginx".to_string()),
..Default::default()
}],
..Default::default()
}),
}],
..Default::default()
});
let mut spec = DeploymentSpec::default();
spec.template = pod_template;
spec.selector = LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
};
let spec = DeploymentSpec {
template: pod_template,
selector: LabelSelector {
match_expressions: None,
match_labels: Some(BTreeMap::from([(
"app".to_string(),
"nginx-test".to_string(),
)])),
},
..Default::default()
};
let mut deployment = Deployment::default();
deployment.spec = Some(spec);
deployment.metadata.name = Some("nginx-test".to_string());
Deployment {
spec: Some(spec),
metadata: ObjectMeta {
name: Some("nginx-test".to_string()),
..Default::default()
},
..Default::default()
}
deployment
}
fn nginx_deployment() -> Deployment {

View File

@@ -23,7 +23,7 @@ async fn main() {
// This config can be extended as needed for more complicated configurations
config: LAMPConfig {
project_root: "./php".into(),
database_size: "4Gi".to_string().into(),
database_size: format!("4Gi").into(),
..Default::default()
},
};

View File

@@ -1,4 +1,4 @@
use std::{collections::HashMap, str::FromStr};
use std::collections::HashMap;
use harmony::{
data::Id,
@@ -28,7 +28,7 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("1234").unwrap(),
id: Id::from_string("1234".to_string()),
name: "test-tenant".to_string(),
resource_limits: ResourceLimits {
cpu_request_cores: 6.0,

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
http::StaticFilesHttpScore,
ipxe::IpxeScore,
@@ -129,21 +130,16 @@ async fn main() {
"./data/watchguard/pxe-http-files".to_string(),
));
let ipxe_score = IpxeScore::new();
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -8,6 +8,7 @@ use harmony::{
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
maestro::Maestro,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
@@ -83,25 +84,20 @@ async fn main() {
let http_score = StaticFilesHttpScore::new(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
));
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
)
.await
.unwrap();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
]);
harmony_tui::init(maestro).await.unwrap();
}

View File

@@ -1,5 +1,3 @@
use std::str::FromStr;
use harmony::{
data::Id,
inventory::Inventory,
@@ -11,7 +9,7 @@ use harmony::{
async fn main() {
let tenant = TenantScore {
config: TenantConfig {
id: Id::from_str("test-tenant-id").unwrap(),
id: Id::from_str("test-tenant-id"),
name: "testtenant".to_string(),
..Default::default()
},

View File

@@ -2,6 +2,7 @@ use std::net::{SocketAddr, SocketAddrV4};
use harmony::{
inventory::Inventory,
maestro::Maestro,
modules::{
dns::DnsScore,
dummy::{ErrorScore, PanicScore, SuccessScore},
@@ -15,19 +16,18 @@ use harmony_macros::ipv4;
#[tokio::main]
async fn main() {
harmony_tui::run(
Inventory::autoload(),
DummyInfra {},
vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
],
)
.await
.unwrap();
let inventory = Inventory::autoload();
let topology = DummyInfra {};
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(vec![
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
Box::new(DnsScore::new(vec![], None)),
Box::new(build_large_score()),
]);
harmony_tui::init(maestro).await.unwrap();
}
fn build_large_score() -> LoadBalancerScore {

View File

@@ -5,9 +5,6 @@ version.workspace = true
readme.workspace = true
license.workspace = true
[features]
testing = []
[dependencies]
rand = "0.9"
hex = "0.4"
@@ -16,8 +13,8 @@ reqwest = { version = "0.11", features = ["blocking", "json"] }
russh = "0.45.0"
rust-ipmi = "0.1.1"
semver = "1.0.23"
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
tokio.workspace = true
derive-new.workspace = true
log.workspace = true
@@ -38,8 +35,8 @@ serde-value.workspace = true
helm-wrapper-rs = "0.4.0"
non-blank-string-rs = "1.0.4"
k3d-rs = { path = "../k3d" }
directories.workspace = true
lazy_static.workspace = true
directories = "6.0.0"
lazy_static = "1.5.0"
dockerfile_builder = "0.1.5"
temp-file = "0.1.9"
convert_case.workspace = true
@@ -59,7 +56,7 @@ similar.workspace = true
futures-util = "0.3.31"
tokio-util = "0.7.15"
strum = { version = "0.27.1", features = ["derive"] }
tempfile.workspace = true
tempfile = "3.20.0"
serde_with = "3.14.0"
schemars = "0.8.22"
kube-derive = "1.1.0"
@@ -67,7 +64,6 @@ bollard.workspace = true
tar.workspace = true
base64.workspace = true
once_cell = "1.21.3"
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
[dev-dependencies]
pretty_assertions.workspace = true

Binary file not shown.

View File

@@ -11,5 +11,5 @@ lazy_static! {
pub static ref REGISTRY_PROJECT: String =
std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string());
pub static ref DRY_RUN: bool =
std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false));
std::env::var("HARMONY_DRY_RUN").map_or(true, |value| value.parse().unwrap_or(true));
}

View File

@@ -1,6 +1,5 @@
use rand::distr::Alphanumeric;
use rand::distr::SampleString;
use std::str::FromStr;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
@@ -24,13 +23,13 @@ pub struct Id {
value: String,
}
impl FromStr for Id {
type Err = ();
impl Id {
pub fn from_string(value: String) -> Self {
Self { value }
}
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Id {
value: s.to_string(),
})
pub fn from_str(value: &str) -> Self {
Self::from_string(value.to_string())
}
}

View File

@@ -47,7 +47,7 @@ impl serde::Serialize for Version {
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.value.fmt(f)
return self.value.fmt(f);
}
}

View File

@@ -35,9 +35,10 @@ impl PhysicalHost {
pub fn cluster_mac(&self) -> MacAddress {
self.network
.first()
.get(0)
.expect("Cluster physical host should have a network interface")
.mac_address
.clone()
}
pub fn cpu(mut self, cpu_count: Option<u64>) -> Self {

View File

@@ -2,42 +2,28 @@ use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::modules::application::ApplicationFeatureStatus;
use super::{
interpret::{InterpretError, Outcome},
topology::TopologyStatus,
};
use super::interpret::{InterpretError, Outcome};
#[derive(Debug, Clone)]
pub enum HarmonyEvent {
HarmonyStarted,
HarmonyFinished,
PrepareTopologyStarted {
topology: String,
},
TopologyPrepared {
topology: String,
outcome: Outcome,
},
InterpretExecutionStarted {
execution_id: String,
topology: String,
interpret: String,
score: String,
message: String,
},
InterpretExecutionFinished {
execution_id: String,
topology: String,
interpret: String,
score: String,
outcome: Result<Outcome, InterpretError>,
},
TopologyStateChanged {
topology: String,
status: TopologyStatus,
message: Option<String>,
},
ApplicationFeatureStateChanged {
topology: String,
application: String,
feature: String,
status: ApplicationFeatureStatus,
},
}
static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
@@ -47,14 +33,9 @@ static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| {
});
pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> {
if cfg!(any(test, feature = "testing")) {
let _ = event; // Suppress the "unused variable" warning for `event`
Ok(())
} else {
match HARMONY_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
match HARMONY_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
}

View File

@@ -7,7 +7,6 @@ use super::{
data::{Id, Version},
executors::ExecutorError,
inventory::Inventory,
topology::PreparationError,
};
pub enum InterpretName {
@@ -24,14 +23,6 @@ pub enum InterpretName {
TenantInterpret,
Application,
ArgoCD,
Alerting,
Ntfy,
HelmChart,
HelmCommand,
K8sResource,
Lamp,
ApplicationMonitoring,
K8sPrometheusCrdAlerting,
}
impl std::fmt::Display for InterpretName {
@@ -50,14 +41,6 @@ impl std::fmt::Display for InterpretName {
InterpretName::TenantInterpret => f.write_str("Tenant"),
InterpretName::Application => f.write_str("Application"),
InterpretName::ArgoCD => f.write_str("ArgoCD"),
InterpretName::Alerting => f.write_str("Alerting"),
InterpretName::Ntfy => f.write_str("Ntfy"),
InterpretName::HelmChart => f.write_str("HelmChart"),
InterpretName::HelmCommand => f.write_str("HelmCommand"),
InterpretName::K8sResource => f.write_str("K8sResource"),
InterpretName::Lamp => f.write_str("LAMP"),
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
}
}
}
@@ -130,14 +113,6 @@ impl std::fmt::Display for InterpretError {
}
impl Error for InterpretError {}
impl From<PreparationError> for InterpretError {
fn from(value: PreparationError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<ExecutorError> for InterpretError {
fn from(value: ExecutorError) -> Self {
Self {

View File

@@ -1,14 +1,14 @@
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use log::{debug, warn};
use crate::topology::TopologyStatus;
use crate::instrumentation::{self, HarmonyEvent};
use super::{
interpret::{InterpretError, Outcome},
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{PreparationError, PreparationOutcome, Topology, TopologyState},
topology::Topology,
};
type ScoreVec<T> = Vec<Box<dyn Score<T>>>;
@@ -17,7 +17,7 @@ pub struct Maestro<T: Topology> {
inventory: Inventory,
topology: T,
scores: Arc<RwLock<ScoreVec<T>>>,
topology_state: TopologyState,
topology_preparation_result: Mutex<Option<Outcome>>,
}
impl<T: Topology> Maestro<T> {
@@ -25,46 +25,41 @@ impl<T: Topology> Maestro<T> {
///
/// This should rarely be used. Most of the time Maestro::initialize should be used instead.
pub fn new_without_initialization(inventory: Inventory, topology: T) -> Self {
let topology_name = topology.name().to_string();
Self {
inventory,
topology,
scores: Arc::new(RwLock::new(Vec::new())),
topology_state: TopologyState::new(topology_name),
topology_preparation_result: None.into(),
}
}
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, PreparationError> {
let mut instance = Self::new_without_initialization(inventory, topology);
pub async fn initialize(inventory: Inventory, topology: T) -> Result<Self, InterpretError> {
let instance = Self::new_without_initialization(inventory, topology);
instance.prepare_topology().await?;
Ok(instance)
}
/// Ensures the associated Topology is ready for operations.
/// Delegates the readiness check and potential setup actions to the Topology.
async fn prepare_topology(&mut self) -> Result<PreparationOutcome, PreparationError> {
self.topology_state.prepare();
pub async fn prepare_topology(&self) -> Result<Outcome, InterpretError> {
instrumentation::instrument(HarmonyEvent::PrepareTopologyStarted {
topology: self.topology.name().to_string(),
})
.unwrap();
let result = self.topology.ensure_ready().await;
let outcome = self.topology.ensure_ready().await?;
match result {
Ok(outcome) => {
match outcome.clone() {
PreparationOutcome::Success { details } => {
self.topology_state.success(details);
}
PreparationOutcome::Noop => {
self.topology_state.noop();
}
};
Ok(outcome)
}
Err(err) => {
self.topology_state.error(err.to_string());
Err(err)
}
}
instrumentation::instrument(HarmonyEvent::TopologyPrepared {
topology: self.topology.name().to_string(),
outcome: outcome.clone(),
})
.unwrap();
self.topology_preparation_result
.lock()
.unwrap()
.replace(outcome.clone());
Ok(outcome)
}
pub fn register_all(&mut self, mut scores: ScoreVec<T>) {
@@ -73,7 +68,15 @@ impl<T: Topology> Maestro<T> {
}
fn is_topology_initialized(&self) -> bool {
self.topology_state.status == TopologyStatus::Success
let result = self.topology_preparation_result.lock().unwrap();
if let Some(outcome) = result.as_ref() {
match outcome.status {
InterpretStatus::SUCCESS => return true,
_ => return false,
}
} else {
false
}
}
pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> {
@@ -84,8 +87,10 @@ impl<T: Topology> Maestro<T> {
self.topology.name(),
);
}
debug!("Interpreting score {score:?}");
let result = score.interpret(&self.inventory, &self.topology).await;
debug!("Running score {score:?}");
let interpret = score.create_interpret();
debug!("Launching interpret {interpret:?}");
let result = interpret.execute(&self.inventory, &self.topology).await;
debug!("Got result {result:?}");
result
}

View File

@@ -1,62 +1,22 @@
use std::collections::BTreeMap;
use async_trait::async_trait;
use serde::Serialize;
use serde_value::Value;
use super::{
data::Id,
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, Outcome},
inventory::Inventory,
topology::Topology,
};
use super::{interpret::Interpret, topology::Topology};
#[async_trait]
pub trait Score<T: Topology>:
std::fmt::Debug + ScoreToString<T> + Send + Sync + CloneBoxScore<T> + SerializeScore<T>
{
async fn interpret(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let id = Id::default();
let interpret = self.create_interpret();
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
message: format!("{} running...", interpret.get_name()),
})
.unwrap();
let result = interpret.execute(inventory, topology).await;
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
execution_id: id.clone().to_string(),
topology: topology.name().into(),
interpret: interpret.get_name().to_string(),
score: self.name(),
outcome: result.clone(),
})
.unwrap();
result
}
fn name(&self) -> String;
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>>;
fn name(&self) -> String;
}
pub trait SerializeScore<T: Topology> {
fn serialize(&self) -> Value;
}
impl<S, T> SerializeScore<T> for S
impl<'de, S, T> SerializeScore<T> for S
where
T: Topology,
S: Score<T> + Serialize,
@@ -64,7 +24,7 @@ where
fn serialize(&self) -> Value {
// TODO not sure if this is the right place to handle the error or it should bubble
// up?
serde_value::to_value(self).expect("Score should serialize successfully")
serde_value::to_value(&self).expect("Score should serialize successfully")
}
}

View File

@@ -4,6 +4,8 @@ use harmony_types::net::MacAddress;
use log::info;
use crate::executors::ExecutorError;
use crate::interpret::InterpretError;
use crate::interpret::Outcome;
use super::DHCPStaticEntry;
use super::DhcpServer;
@@ -17,8 +19,6 @@ use super::K8sclient;
use super::LoadBalancer;
use super::LoadBalancerService;
use super::LogicalHost;
use super::PreparationError;
use super::PreparationOutcome;
use super::Router;
use super::TftpServer;
@@ -48,7 +48,7 @@ impl Topology for HAClusterTopology {
fn name(&self) -> &str {
"HAClusterTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
todo!(
"ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready."
)
@@ -241,15 +241,13 @@ pub struct DummyInfra;
#[async_trait]
impl Topology for DummyInfra {
fn name(&self) -> &str {
"DummyInfra"
todo!()
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let dummy_msg = "This is a dummy infrastructure that does nothing";
info!("{dummy_msg}");
Ok(PreparationOutcome::Success {
details: dummy_msg.into(),
})
Ok(Outcome::success(dummy_msg.to_string()))
}
}

View File

@@ -1,11 +1,12 @@
use derive_new::new;
use futures_util::StreamExt;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
};
use kube::{
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
api::{Api, AttachParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
runtime::reflector::Lookup,
@@ -17,9 +18,7 @@ use kube::{
};
use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use similar::TextDiff;
use tokio::io::AsyncReadExt;
use similar::{DiffableStr, TextDiff};
#[derive(new, Clone)]
pub struct K8sClient {
@@ -53,66 +52,6 @@ impl K8sClient {
})
}
pub async fn get_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<Option<Deployment>, Error> {
let deps: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(deps.get_opt(name).await?)
}
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
let pods: Api<Pod> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
Ok(pods.get_opt(name).await?)
}
pub async fn scale_deployment(
&self,
name: &str,
namespace: Option<&str>,
replicas: u32,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let patch = json!({
"spec": {
"replicas": replicas
}
});
let pp = PatchParams::default();
let scale = Patch::Apply(&patch);
deployments.patch_scale(name, &pp, &scale).await?;
Ok(())
}
pub async fn delete_deployment(
&self,
name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let deployments: Api<Deployment> = if let Some(ns) = namespace {
Api::namespaced(self.client.clone(), ns)
} else {
Api::default_namespaced(self.client.clone())
};
let delete_params = DeleteParams::default();
deployments.delete(name, &delete_params).await?;
Ok(())
}
pub async fn wait_until_deployment_ready(
&self,
name: String,
@@ -128,75 +67,13 @@ impl K8sClient {
}
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
let t = timeout.unwrap_or(300);
let t = if let Some(t) = timeout { t } else { 300 };
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
if res.is_ok() {
Ok(())
if let Ok(r) = res {
return Ok(());
} else {
Err("timed out while waiting for deployment".to_string())
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
&self,
name: String,
label: String,
namespace: Option<&str>,
command: Vec<&str>,
) -> Result<String, String> {
let api: Api<Pod>;
if let Some(ns) = namespace {
api = Api::namespaced(self.client.clone(), ns);
} else {
api = Api::default_namespaced(self.client.clone());
}
let pod_list = api
.list(&ListParams::default().labels(format!("{label}={name}").as_str()))
.await
.expect("couldn't get list of pods");
let res = api
.exec(
pod_list
.items
.first()
.expect("couldn't get pod")
.name()
.expect("couldn't get pod name")
.into_owned()
.as_str(),
command,
&AttachParams::default().stdout(true).stderr(true),
)
.await;
match res {
Err(e) => Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
.expect("Couldn't get status")
.await
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
let mut stdout_buf = String::new();
if let Some(mut stdout) = process.stdout().take() {
stdout.read_to_string(&mut stdout_buf).await;
}
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" {
Ok(stdout_buf)
} else {
Err(s)
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
}
}
return Err("timed out while waiting for deployment".to_string());
}
}
@@ -235,7 +112,7 @@ impl K8sClient {
.await;
match res {
Err(e) => Err(e.to_string()),
Err(e) => return Err(e.to_string()),
Ok(mut process) => {
let status = process
.take_status()
@@ -244,10 +121,14 @@ impl K8sClient {
.expect("Couldn't unwrap status");
if let Some(s) = status.status {
debug!("Status: {} - {:?}", s, status.details);
if s == "Success" { Ok(()) } else { Err(s) }
debug!("Status: {}", s);
if s == "Success" {
return Ok(());
} else {
return Err(s);
}
} else {
Err("Couldn't get inner status of pod exec".to_string())
return Err("Couldn't get inner status of pod exec".to_string());
}
}
}
@@ -288,9 +169,8 @@ impl K8sClient {
trace!("Received current value {current:#?}");
// The resource exists, so we calculate and display a diff.
println!("\nPerforming dry-run for resource: '{}'", name);
let mut current_yaml = serde_yaml::to_value(&current).unwrap_or_else(|_| {
panic!("Could not serialize current value : {current:#?}")
});
let mut current_yaml = serde_yaml::to_value(&current)
.expect(&format!("Could not serialize current value : {current:#?}"));
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
let map = current_yaml.as_mapping_mut().unwrap();
let removed = map.remove_entry("status");
@@ -357,7 +237,7 @@ impl K8sClient {
}
}
pub async fn apply_many<K>(&self, resource: &[K], ns: Option<&str>) -> Result<Vec<K>, Error>
pub async fn apply_many<K>(&self, resource: &Vec<K>, ns: Option<&str>) -> Result<Vec<K>, Error>
where
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
<K as Resource>::Scope: ApplyStrategy<K>,
@@ -373,7 +253,7 @@ impl K8sClient {
pub async fn apply_yaml_many(
&self,
#[allow(clippy::ptr_arg)] yaml: &Vec<serde_yaml::Value>,
yaml: &Vec<serde_yaml::Value>,
ns: Option<&str>,
) -> Result<(), Error> {
for y in yaml.iter() {

View File

@@ -7,7 +7,7 @@ use tokio::sync::OnceCell;
use crate::{
executors::ExecutorError,
interpret::InterpretStatus,
interpret::{InterpretError, InterpretStatus, Outcome},
inventory::Inventory,
modules::{
k3d::K3DInstallationScore,
@@ -24,17 +24,10 @@ use crate::{
};
use super::{
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
PreparationOutcome, Topology,
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology,
k8s::K8sClient,
oberservability::monitoring::AlertReceiver,
tenant::{
TenantConfig, TenantManager,
k8s::K8sTenantManager,
network_policy::{
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
},
},
tenant::{TenantConfig, TenantManager, k8s::K8sTenantManager},
};
#[derive(Clone, Debug)]
@@ -81,30 +74,20 @@ impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
sender: &CRDPrometheus,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
) -> Result<PreparationOutcome, PreparationError> {
) -> Result<Outcome, InterpretError> {
let po_result = self.ensure_prometheus_operator(sender).await?;
if po_result == PreparationOutcome::Noop {
if po_result.status == InterpretStatus::NOOP {
debug!("Skipping Prometheus CR installation due to missing operator.");
return Ok(po_result);
return Ok(Outcome::noop());
}
let result = self
.get_k8s_prometheus_application_score(sender.clone(), receivers)
self.get_k8s_prometheus_application_score(sender.clone(), receivers)
.await
.interpret(inventory, self)
.await;
.create_interpret()
.execute(inventory, self)
.await?;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: outcome.message,
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
Ok(Outcome::success(format!("No action, working on cluster ")))
}
}
@@ -141,7 +124,7 @@ impl K8sAnywhereTopology {
) -> K8sPrometheusCRDAlertingScore {
K8sPrometheusCRDAlertingScore {
sender,
receivers: receivers.unwrap_or_default(),
receivers: receivers.unwrap_or_else(Vec::new),
service_monitors: vec![],
prometheus_rules: vec![],
}
@@ -175,23 +158,15 @@ impl K8sAnywhereTopology {
K3DInstallationScore::default()
}
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
let result = self
.get_k3d_installation_score()
.interpret(&Inventory::empty(), self)
.await;
match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(()),
InterpretStatus::NOOP => Ok(()),
_ => Err(PreparationError::new(outcome.message)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
}
async fn try_install_k3d(&self) -> Result<(), InterpretError> {
self.get_k3d_installation_score()
.create_interpret()
.execute(&Inventory::empty(), self)
.await?;
Ok(())
}
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, InterpretError> {
let k8s_anywhere_config = &self.config;
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
@@ -201,7 +176,7 @@ impl K8sAnywhereTopology {
} else {
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
debug!("Loading kubeconfig {kubeconfig}");
match self.try_load_kubeconfig(kubeconfig).await {
match self.try_load_kubeconfig(&kubeconfig).await {
Some(client) => {
return Ok(Some(K8sState {
client: Arc::new(client),
@@ -210,7 +185,7 @@ impl K8sAnywhereTopology {
}));
}
None => {
return Err(PreparationError::new(format!(
return Err(InterpretError::new(format!(
"Failed to load kubeconfig from {kubeconfig}"
)));
}
@@ -256,21 +231,16 @@ impl K8sAnywhereTopology {
Ok(Some(state))
}
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
if self.tenant_manager.get().is_some() {
async fn ensure_k8s_tenant_manager(&self) -> Result<(), String> {
if let Some(_) = self.tenant_manager.get() {
return Ok(());
}
self.tenant_manager
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
// TOOD: checker si K8s ou K3d/s tenant manager (ref. issue https://git.nationtech.io/NationTech/harmony/issues/94)
let k8s_client = self.k8s_client().await?;
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
{
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
};
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
Ok(K8sTenantManager::new(k8s_client))
})
.await?;
@@ -289,11 +259,11 @@ impl K8sAnywhereTopology {
async fn ensure_prometheus_operator(
&self,
sender: &CRDPrometheus,
) -> Result<PreparationOutcome, PreparationError> {
) -> Result<Outcome, InterpretError> {
let status = Command::new("sh")
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
.status()
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
.map_err(|e| InterpretError::new(format!("could not connect to cluster: {}", e)))?;
if !status.success() {
if let Some(Some(k8s_state)) = self.k8s_state.get() {
@@ -302,37 +272,30 @@ impl K8sAnywhereTopology {
debug!("installing prometheus operator");
let op_score =
prometheus_operator_helm_chart_score(sender.namespace.clone());
let result = op_score.interpret(&Inventory::empty(), self).await;
return match result {
Ok(outcome) => match outcome.status {
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
details: "installed prometheus operator".into(),
}),
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
_ => Err(PreparationError::new(
"failed to install prometheus operator (unknown error)".into(),
)),
},
Err(err) => Err(PreparationError::new(err.to_string())),
};
op_score
.create_interpret()
.execute(&Inventory::empty(), self)
.await?;
return Ok(Outcome::success(
"installed prometheus operator".to_string(),
));
}
K8sSource::Kubeconfig => {
debug!("unable to install prometheus operator, contact cluster admin");
return Ok(PreparationOutcome::Noop);
return Ok(Outcome::noop());
}
}
} else {
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
return Ok(PreparationOutcome::Noop);
return Ok(Outcome::noop());
}
}
debug!("Prometheus operator is already present, skipping install");
Ok(PreparationOutcome::Success {
details: "prometheus operator present in cluster".into(),
})
Ok(Outcome::success(
"prometheus operator present in cluster".to_string(),
))
}
}
@@ -391,25 +354,26 @@ impl Topology for K8sAnywhereTopology {
"K8sAnywhereTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
let k8s_state = self
.k8s_state
.get_or_try_init(|| self.try_get_or_install_k8s_client())
.await?;
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
"no K8s client could be found or installed".to_string(),
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(InterpretError::new(
"No K8s client could be found or installed".to_string(),
))?;
self.ensure_k8s_tenant_manager(k8s_state)
self.ensure_k8s_tenant_manager()
.await
.map_err(PreparationError::new)?;
.map_err(|e| InterpretError::new(e))?;
match self.is_helm_available() {
Ok(()) => Ok(PreparationOutcome::Success {
details: format!("{} + helm available", k8s_state.message.clone()),
}),
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
Ok(()) => Ok(Outcome::success(format!(
"{} + helm available",
k8s_state.message.clone()
))),
Err(e) => Err(InterpretError::new(format!("helm unavailable: {}", e))),
}
}
}

View File

@@ -1,7 +1,9 @@
use async_trait::async_trait;
use derive_new::new;
use super::{HelmCommand, PreparationError, PreparationOutcome, Topology};
use crate::interpret::{InterpretError, Outcome};
use super::{HelmCommand, Topology};
#[derive(new)]
pub struct LocalhostTopology;
@@ -12,10 +14,10 @@ impl Topology for LocalhostTopology {
"LocalHostTopology"
}
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Localhost is Chuck Norris, always ready.".into(),
})
async fn ensure_ready(&self) -> Result<Outcome, InterpretError> {
Ok(Outcome::success(
"Localhost is Chuck Norris, always ready.".to_string(),
))
}
}

View File

@@ -6,7 +6,6 @@ mod k8s_anywhere;
mod localhost;
pub mod oberservability;
pub mod tenant;
use derive_new::new;
pub use k8s_anywhere::*;
pub use localhost::*;
pub mod k8s;
@@ -27,13 +26,10 @@ pub use tftp::*;
mod helm_command;
pub use helm_command::*;
use super::{
executors::ExecutorError,
instrumentation::{self, HarmonyEvent},
};
use std::error::Error;
use std::net::IpAddr;
use super::interpret::{InterpretError, Outcome};
/// Represents a logical view of an infrastructure environment providing specific capabilities.
///
/// A Topology acts as a self-contained "package" responsible for managing access
@@ -61,128 +57,9 @@ pub trait Topology: Send + Sync {
/// * **Internal Orchestration:** For complex topologies, this method might manage dependencies on other sub-topologies, ensuring *their* `ensure_ready` is called first. Using nested `Maestros` to run setup `Scores` against these sub-topologies is the recommended pattern for non-trivial bootstrapping, allowing reuse of Harmony's core orchestration logic.
///
/// # Returns
/// - `Ok(PreparationOutcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(PreparationError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PreparationOutcome {
Success { details: String },
Noop,
}
#[derive(Debug, Clone, new)]
pub struct PreparationError {
msg: String,
}
impl std::fmt::Display for PreparationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.msg)
}
}
impl Error for PreparationError {}
impl From<ExecutorError> for PreparationError {
fn from(value: ExecutorError) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}
impl From<kube::Error> for PreparationError {
fn from(value: kube::Error) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
impl From<String> for PreparationError {
fn from(value: String) -> Self {
Self {
msg: format!("PreparationError : {value}"),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum TopologyStatus {
Queued,
Preparing,
Success,
Noop,
Error,
}
pub struct TopologyState {
pub topology: String,
pub status: TopologyStatus,
}
impl TopologyState {
pub fn new(topology: String) -> Self {
let instance = Self {
topology,
status: TopologyStatus::Queued,
};
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: instance.topology.clone(),
status: instance.status.clone(),
message: None,
})
.unwrap();
instance
}
pub fn prepare(&mut self) {
self.status = TopologyStatus::Preparing;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn success(&mut self, message: String) {
self.status = TopologyStatus::Success;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
pub fn noop(&mut self) {
self.status = TopologyStatus::Noop;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: None,
})
.unwrap();
}
pub fn error(&mut self, message: String) {
self.status = TopologyStatus::Error;
instrumentation::instrument(HarmonyEvent::TopologyStateChanged {
topology: self.topology.clone(),
status: self.status.clone(),
message: Some(message),
})
.unwrap();
}
/// - `Ok(Outcome)`: Indicates the topology is now ready. The `Outcome` status might be `SUCCESS` if actions were taken, or `NOOP` if it was already ready. The message should provide context.
/// - `Err(TopologyError)`: Indicates the topology could not reach a ready state due to configuration issues, discovery failures, bootstrap errors, or unsupported environments.
async fn ensure_ready(&self) -> Result<Outcome, InterpretError>;
}
#[derive(Debug)]
@@ -211,7 +88,7 @@ impl Serialize for Url {
{
match self {
Url::LocalFolder(path) => serializer.serialize_str(path),
Url::Url(url) => serializer.serialize_str(url.as_str()),
Url::Url(url) => serializer.serialize_str(&url.as_str()),
}
}
}

View File

@@ -45,7 +45,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
}
fn get_name(&self) -> InterpretName {
InterpretName::Alerting
todo!()
}
fn get_version(&self) -> Version {

View File

@@ -27,11 +27,11 @@ pub struct UnmanagedRouter {
impl Router for UnmanagedRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway
self.gateway.clone()
}
fn get_cidr(&self) -> Ipv4Cidr {
self.cidr
self.cidr.clone()
}
fn get_host(&self) -> LogicalHost {

View File

@@ -15,38 +15,36 @@ use k8s_openapi::{
apimachinery::pkg::util::intstr::IntOrString,
};
use kube::Resource;
use log::debug;
use log::{debug, info, warn};
use serde::de::DeserializeOwned;
use serde_json::json;
use tokio::sync::OnceCell;
use super::{TenantConfig, TenantManager, network_policy::NetworkPolicyStrategy};
use super::{TenantConfig, TenantManager};
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct K8sTenantManager {
k8s_client: Arc<K8sClient>,
k8s_tenant_config: Arc<OnceCell<TenantConfig>>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
}
impl K8sTenantManager {
pub fn new(
client: Arc<K8sClient>,
network_policy_strategy: Box<dyn NetworkPolicyStrategy>,
) -> Self {
pub fn new(client: Arc<K8sClient>) -> Self {
Self {
k8s_client: client,
k8s_tenant_config: Arc::new(OnceCell::new()),
network_policy_strategy,
}
}
}
impl K8sTenantManager {
fn get_namespace_name(&self, config: &TenantConfig) -> String {
config.name.clone()
}
fn ensure_constraints(&self, _namespace: &Namespace) -> Result<(), ExecutorError> {
// TODO: Ensure constraints are applied to namespace (https://git.nationtech.io/NationTech/harmony/issues/98)
warn!("Validate that when tenant already exists (by id) that name has not changed");
warn!("Make sure other Tenant constraints are respected by this k8s implementation");
Ok(())
}
@@ -221,6 +219,29 @@ impl K8sTenantManager {
}
]
},
{
"to": [
{
"ipBlock": {
"cidr": "10.43.0.1/32",
}
}
]
},
{
"to": [
{
//TODO this ip is from the docker network that k3d is running on
//since k3d does not deploy kube-api-server as a pod it needs to ahve the ip
//address opened up
//need to find a way to automatically detect the ip address from the docker
//network
"ipBlock": {
"cidr": "172.24.0.0/16",
}
}
]
},
{
"to": [
{
@@ -288,19 +309,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -345,19 +366,19 @@ impl K8sTenantManager {
let ports: Option<Vec<NetworkPolicyPort>> =
c.1.as_ref().map(|spec| match &spec.data {
super::PortSpecData::SinglePort(port) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*port).into())),
port: Some(IntOrString::Int(port.clone().into())),
..Default::default()
}],
super::PortSpecData::PortRange(start, end) => vec![NetworkPolicyPort {
port: Some(IntOrString::Int((*start).into())),
end_port: Some((*end).into()),
port: Some(IntOrString::Int(start.clone().into())),
end_port: Some(end.clone().into()),
protocol: None, // Not currently supported by Harmony
}],
super::PortSpecData::ListOfPorts(items) => items
.iter()
.map(|i| NetworkPolicyPort {
port: Some(IntOrString::Int((*i).into())),
port: Some(IntOrString::Int(i.clone().into())),
..Default::default()
})
.collect(),
@@ -390,27 +411,12 @@ impl K8sTenantManager {
}
}
impl Clone for K8sTenantManager {
fn clone(&self) -> Self {
Self {
k8s_client: self.k8s_client.clone(),
k8s_tenant_config: self.k8s_tenant_config.clone(),
network_policy_strategy: self.network_policy_strategy.clone_box(),
}
}
}
#[async_trait]
impl TenantManager for K8sTenantManager {
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
let namespace = self.build_namespace(config)?;
let resource_quota = self.build_resource_quota(config)?;
let network_policy = self.build_network_policy(config)?;
let network_policy = self
.network_policy_strategy
.adjust_policy(network_policy, config);
let resource_limit_range = self.build_limit_range(config)?;
self.ensure_constraints(&namespace)?;
@@ -427,14 +433,13 @@ impl TenantManager for K8sTenantManager {
debug!("Creating network_policy for tenant {}", config.name);
self.apply_resource(network_policy, config).await?;
debug!(
info!(
"Success provisionning K8s tenant id {} name {}",
config.id, config.name
);
self.store_config(config);
Ok(())
}
async fn get_tenant_config(&self) -> Option<TenantConfig> {
self.k8s_tenant_config.get().cloned()
}

View File

@@ -1,11 +1,11 @@
pub mod k8s;
mod manager;
pub mod network_policy;
use std::str::FromStr;
use crate::data::Id;
pub use manager::*;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::data::Id;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] // Assuming serde for Scores
pub struct TenantConfig {

View File

@@ -1,120 +0,0 @@
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::TenantConfig;
pub trait NetworkPolicyStrategy: Send + Sync + std::fmt::Debug {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy>;
fn adjust_policy(&self, policy: NetworkPolicy, config: &TenantConfig) -> NetworkPolicy;
}
#[derive(Clone, Debug)]
pub struct NoopNetworkPolicyStrategy {}
impl NoopNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for NoopNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for NoopNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
policy
}
}
#[derive(Clone, Debug)]
pub struct K3dNetworkPolicyStrategy {}
impl K3dNetworkPolicyStrategy {
pub fn new() -> Self {
Self {}
}
}
impl Default for K3dNetworkPolicyStrategy {
fn default() -> Self {
Self::new()
}
}
impl NetworkPolicyStrategy for K3dNetworkPolicyStrategy {
fn clone_box(&self) -> Box<dyn NetworkPolicyStrategy> {
Box::new(self.clone())
}
fn adjust_policy(&self, policy: NetworkPolicy, _config: &TenantConfig) -> NetworkPolicy {
let mut egress = policy
.spec
.clone()
.unwrap_or_default()
.egress
.clone()
.unwrap_or_default();
egress.push(NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(), // TODO: query the IP range https://git.nationtech.io/NationTech/harmony/issues/108
..Default::default()
}),
..Default::default()
}]),
..Default::default()
});
NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(egress),
..policy.spec.unwrap_or_default()
}),
..policy
}
}
}
#[cfg(test)]
mod tests {
use k8s_openapi::api::networking::v1::{
IPBlock, NetworkPolicy, NetworkPolicyEgressRule, NetworkPolicyPeer, NetworkPolicySpec,
};
use super::{K3dNetworkPolicyStrategy, NetworkPolicyStrategy};
#[test]
pub fn should_add_ip_block_for_k3d_harmony_server() {
let strategy = K3dNetworkPolicyStrategy::new();
let policy =
strategy.adjust_policy(NetworkPolicy::default(), &super::TenantConfig::default());
let expected_policy = NetworkPolicy {
spec: Some(NetworkPolicySpec {
egress: Some(vec![NetworkPolicyEgressRule {
to: Some(vec![NetworkPolicyPeer {
ip_block: Some(IPBlock {
cidr: "172.18.0.0/16".into(),
..Default::default()
}),
..Default::default()
}]),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
assert_eq!(expected_policy, policy);
}
}

View File

@@ -60,7 +60,7 @@ impl DnsServer for OPNSenseFirewall {
}
fn get_ip(&self) -> IpAddress {
OPNSenseFirewall::get_ip(self)
OPNSenseFirewall::get_ip(&self)
}
fn get_host(&self) -> LogicalHost {

View File

@@ -48,7 +48,7 @@ impl HttpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let caddy = config.caddy();
if caddy.get_full_config().is_none() {
if let None = caddy.get_full_config() {
info!("Http config not available in opnsense config, installing package");
config.install_package("os-caddy").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -121,12 +121,10 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
LoadBalancerService {
backend_servers,
listening_port: frontend.bind.parse().unwrap_or_else(|_| {
panic!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)
}),
listening_port: frontend.bind.parse().expect(&format!(
"HAProxy frontend address should be a valid SocketAddr, got {}",
frontend.bind
)),
health_check,
}
})
@@ -169,28 +167,28 @@ pub(crate) fn get_health_check_for_backend(
None => return None,
};
let haproxy_health_check = haproxy
let haproxy_health_check = match haproxy
.healthchecks
.healthchecks
.iter()
.find(|h| &h.uuid == health_check_uuid)?;
.find(|h| &h.uuid == health_check_uuid)
{
Some(health_check) => health_check,
None => return None,
};
let binding = haproxy_health_check.health_check_type.to_uppercase();
let uppercase = binding.as_str();
match uppercase {
"TCP" => {
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
if !checkport.is_empty() {
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|_| {
panic!(
"HAProxy check port should be a valid port number, got {checkport}"
)
},
))));
if checkport.len() > 0 {
return Some(HealthCheck::TCP(Some(checkport.parse().expect(&format!(
"HAProxy check port should be a valid port number, got {checkport}"
)))));
}
}
Some(HealthCheck::TCP(None))
return Some(HealthCheck::TCP(None));
}
"HTTP" => {
let path: String = haproxy_health_check
@@ -357,13 +355,16 @@ mod tests {
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let mut server = HAProxyServer::default();
server.uuid = "server3".to_string();
server.address = "192.168.1.3".to_string();
server.port = 8080;
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -383,12 +384,10 @@ mod tests {
let backend = HAProxyBackend::default();
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -403,12 +402,10 @@ mod tests {
backend.linked_servers.content = Some("server4,server5".to_string());
// Create an HAProxy instance with servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "192.168.1.1".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
@@ -419,28 +416,20 @@ mod tests {
#[test]
fn test_get_servers_for_backend_multiple_linked_servers() {
// Create a backend with multiple linked servers
#[allow(clippy::field_reassign_with_default)]
let mut backend = HAProxyBackend::default();
backend.linked_servers.content = Some("server1,server2".to_string());
//
// Create an HAProxy instance with matching servers
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(),
port: 80,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server1".to_string();
server.address = "some-hostname.test.mcd".to_string();
server.port = 80;
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: "192.168.1.2".to_string(),
port: 8080,
..Default::default()
};
let mut server = HAProxyServer::default();
server.uuid = "server2".to_string();
server.address = "192.168.1.2".to_string();
server.port = 8080;
haproxy.servers.servers.push(server);
// Call the function
let result = get_servers_for_backend(&backend, &haproxy);
// Check the result

View File

@@ -58,7 +58,7 @@ impl TftpServer for OPNSenseFirewall {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
let mut config = self.opnsense_config.write().await;
let tftp = config.tftp();
if tftp.get_full_config().is_none() {
if let None = tftp.get_full_config() {
info!("Tftp config not available in opnsense config, installing package");
config.install_package("os-tftp").await.map_err(|e| {
ExecutorError::UnexpectedError(format!(

View File

@@ -13,7 +13,7 @@ pub trait ApplicationFeature<T: Topology>:
fn name(&self) -> String;
}
pub trait ApplicationFeatureClone<T: Topology> {
trait ApplicationFeatureClone<T: Topology> {
fn clone_box(&self) -> Box<dyn ApplicationFeature<T>>;
}
@@ -27,7 +27,7 @@ where
}
impl<T: Topology> Serialize for Box<dyn ApplicationFeature<T>> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -184,11 +184,12 @@ impl ArgoApplication {
pub fn to_yaml(&self) -> serde_yaml::Value {
let name = &self.name;
let namespace = if let Some(ns) = self.namespace.as_ref() {
ns
&ns
} else {
"argocd"
};
let project = &self.project;
let source = &self.source;
let yaml_str = format!(
r#"
@@ -227,7 +228,7 @@ spec:
serde_yaml::to_value(&self.source).expect("couldn't serialize source to value");
let sync_policy = serde_yaml::to_value(&self.sync_policy)
.expect("couldn't serialize sync_policy to value");
let revision_history_limit = serde_yaml::to_value(self.revision_history_limit)
let revision_history_limit = serde_yaml::to_value(&self.revision_history_limit)
.expect("couldn't serialize revision_history_limit to value");
spec.insert(

View File

@@ -1,7 +1,7 @@
use std::{io::Write, process::Command, sync::Arc};
use async_trait::async_trait;
use log::info;
use log::{debug, error};
use serde_yaml::Value;
use tempfile::NamedTempFile;
@@ -10,7 +10,7 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
ApplicationFeature, HelmPackage, OCICompliant,
Application, ApplicationFeature, HelmPackage, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
score::Score,
@@ -56,11 +56,14 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
chart_url: String,
image_name: String,
) -> Result<(), String> {
// TODO: This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
// https://git.nationtech.io/NationTech/harmony/issues/106
error!(
"FIXME This works only with local k3d installations, which is fine only for current demo purposes. We assume usage of K8sAnywhereTopology"
);
error!("TODO hardcoded k3d bin path is wrong");
let k3d_bin_path = (*HARMONY_DATA_DIR).join("k3d").join("k3d");
// --- 1. Import the container image into the k3d cluster ---
info!(
debug!(
"Importing image '{}' into k3d cluster 'harmony'",
image_name
);
@@ -77,7 +80,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
}
// --- 2. Get the kubeconfig for the k3d cluster and write it to a temp file ---
info!("Retrieving kubeconfig for k3d cluster 'harmony'");
debug!("Retrieving kubeconfig for k3d cluster 'harmony'");
let kubeconfig_output = Command::new(&k3d_bin_path)
.args(["kubeconfig", "get", "harmony"])
.output()
@@ -98,7 +101,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
let kubeconfig_path = temp_kubeconfig.path().to_str().unwrap();
// --- 3. Install or upgrade the Helm chart in the cluster ---
info!(
debug!(
"Deploying Helm chart '{}' to namespace '{}'",
chart_url, app_name
);
@@ -128,7 +131,7 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
));
}
info!("Successfully deployed '{}' to local k3d cluster.", app_name);
debug!("Successfully deployed '{}' to local k3d cluster.", app_name);
Ok(())
}
}
@@ -148,12 +151,14 @@ impl<
// Or ask for it when unknown
let helm_chart = self.application.build_push_helm_package(&image).await?;
debug!("Pushed new helm chart {helm_chart}");
// TODO: Make building image configurable/skippable if image already exists (prompt)")
// https://git.nationtech.io/NationTech/harmony/issues/104
error!("TODO Make building image configurable/skippable if image already exists (prompt)");
let image = self.application.build_push_oci_image().await?;
debug!("Pushed new docker image {image}");
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
debug!("Installing ContinuousDelivery feature");
// TODO this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
@@ -166,20 +171,17 @@ impl<
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
debug!("Deploying to target {target:?}");
let score = ArgoHelmScore {
namespace: "harmony-example-rust-webapp".to_string(),
openshift: true,
domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
namespace: "harmonydemo-staging".to_string(),
openshift: false,
domain: "argo.harmonydemo.apps.st.mcd".to_string(),
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
@@ -187,11 +189,12 @@ impl<
helm_chart_name: "harmony-example-rust-webapp-chart".to_string(),
values_overrides: None,
name: "harmony-demo-rust-webapp".to_string(),
namespace: "harmony-example-rust-webapp".to_string(),
namespace: "harmonydemo-staging".to_string(),
})],
};
score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.unwrap();
}

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use log::error;
use non_blank_string_rs::NonBlankString;
use serde::Serialize;
use std::str::FromStr;
@@ -49,21 +50,20 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.score.interpret(inventory, topology).await?;
error!("Uncomment below, only disabled for debugging");
self.score
.create_interpret()
.execute(inventory, topology)
.await?;
let k8s_client = topology.k8s_client().await?;
k8s_client
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
.await
.unwrap();
Ok(Outcome::success(format!(
"ArgoCD installed with {} {}",
self.argo_apps.len(),
match self.argo_apps.len() {
1 => "application",
_ => "applications",
}
"Successfully installed ArgoCD and {} Applications",
self.argo_apps.len()
)))
}
@@ -986,7 +986,7 @@ commitServer:
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("argo-cd").unwrap(),
chart_name: NonBlankString::from_str("argo/argo-cd").unwrap(),
chart_version: Some(NonBlankString::from_str("8.1.2").unwrap()),

View File

@@ -4,7 +4,6 @@ use crate::modules::application::{Application, ApplicationFeature};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::MultiTargetTopology;
use crate::{
inventory::Inventory,
modules::monitoring::{
@@ -34,7 +33,6 @@ impl<
+ 'static
+ TenantManager
+ K8sclient
+ MultiTargetTopology
+ std::fmt::Debug
+ PrometheusApplicationMonitoring<CRDPrometheus>,
> ApplicationFeature<T> for Monitoring
@@ -57,11 +55,12 @@ impl<
};
let ntfy = NtfyScore {
namespace: namespace.clone(),
host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(),
host: "localhost".to_string(),
};
ntfy.interpret(&Inventory::empty(), topology)
ntfy.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
.expect("couldn't create interpret for ntfy");
let ntfy_default_auth_username = "harmony";
let ntfy_default_auth_password = "harmony";
@@ -96,9 +95,10 @@ impl<
alerting_score.receivers.push(Box::new(ntfy_receiver));
alerting_score
.interpret(&Inventory::empty(), topology)
.create_interpret()
.execute(&Inventory::empty(), topology)
.await
.map_err(|e| e.to_string())?;
.unwrap();
Ok(())
}
fn name(&self) -> String {

View File

@@ -14,19 +14,11 @@ use serde::Serialize;
use crate::{
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
topology::Topology,
};
#[derive(Clone, Debug)]
pub enum ApplicationFeatureStatus {
Installing,
Installed,
Failed { details: String },
}
pub trait Application: std::fmt::Debug + Send + Sync {
fn name(&self) -> String;
}
@@ -55,41 +47,20 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
.join(", ")
);
for feature in self.features.iter() {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installing,
})
.unwrap();
debug!(
"Installing feature {} for application {app_name}",
feature.name()
);
let _ = match feature.ensure_installed(topology).await {
Ok(()) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Installed,
})
.unwrap();
}
Ok(()) => (),
Err(msg) => {
instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged {
topology: topology.name().into(),
application: self.application.name(),
feature: feature.name(),
status: ApplicationFeatureStatus::Failed {
details: msg.clone(),
},
})
.unwrap();
return Err(InterpretError::new(format!(
"Application Interpret failed to install feature : {msg}"
)));
}
};
}
Ok(Outcome::success("Application created".to_string()))
Ok(Outcome::success("successfully created app".to_string()))
}
fn get_name(&self) -> InterpretName {
@@ -110,7 +81,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
}
impl Serialize for dyn Application {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{

View File

@@ -1,5 +1,5 @@
use std::fs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::process;
use std::sync::Arc;
@@ -10,7 +10,7 @@ use dockerfile_builder::Dockerfile;
use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR};
use dockerfile_builder::instruction_builder::CopyBuilder;
use futures_util::StreamExt;
use log::{debug, info, log_enabled};
use log::{debug, error, log_enabled};
use serde::Serialize;
use tar::Archive;
@@ -46,7 +46,7 @@ where
}
fn name(&self) -> String {
format!("{} [ApplicationScore]", self.application.name())
format!("Application: {}", self.application.name())
}
}
@@ -73,19 +73,19 @@ impl Application for RustWebapp {
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> {
info!("Starting Helm chart build and push for '{}'", self.name);
debug!("Starting Helm chart build and push for '{}'", self.name);
// 1. Create the Helm chart files on disk.
let chart_dir = self
.create_helm_chart_files(image_url)
.map_err(|e| format!("Failed to create Helm chart files: {}", e))?;
info!("Successfully created Helm chart files in {:?}", chart_dir);
debug!("Successfully created Helm chart files in {:?}", chart_dir);
// 2. Package the chart into a .tgz archive.
let packaged_chart_path = self
.package_helm_chart(&chart_dir)
.map_err(|e| format!("Failed to package Helm chart: {}", e))?;
info!(
debug!(
"Successfully packaged Helm chart: {}",
packaged_chart_path.to_string_lossy()
);
@@ -94,7 +94,7 @@ impl HelmPackage for RustWebapp {
let oci_chart_url = self
.push_helm_chart(&packaged_chart_path)
.map_err(|e| format!("Failed to push Helm chart: {}", e))?;
info!("Successfully pushed Helm chart to: {}", oci_chart_url);
debug!("Successfully pushed Helm chart to: {}", oci_chart_url);
Ok(oci_chart_url)
}
@@ -107,20 +107,20 @@ impl OCICompliant for RustWebapp {
async fn build_push_oci_image(&self) -> Result<String, String> {
// This function orchestrates the build and push process.
// It's async to match the trait definition, though the underlying docker commands are blocking.
info!("Starting OCI image build and push for '{}'", self.name);
debug!("Starting OCI image build and push for '{}'", self.name);
// 1. Build the image by calling the synchronous helper function.
let image_tag = self.image_name();
self.build_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to build Docker image: {}", e))?;
info!("Successfully built Docker image: {}", image_tag);
debug!("Successfully built Docker image: {}", image_tag);
// 2. Push the image to the registry.
self.push_docker_image(&image_tag)
.await
.map_err(|e| format!("Failed to push Docker image: {}", e))?;
info!("Successfully pushed Docker image to: {}", image_tag);
debug!("Successfully pushed Docker image to: {}", image_tag);
Ok(image_tag)
}
@@ -174,7 +174,7 @@ impl RustWebapp {
.platform("linux/x86_64");
let mut temp_tar_builder = tar::Builder::new(Vec::new());
temp_tar_builder
let _ = temp_tar_builder
.append_dir_all("", self.project_root.clone())
.unwrap();
let archive = temp_tar_builder
@@ -195,7 +195,7 @@ impl RustWebapp {
);
while let Some(msg) = image_build_stream.next().await {
debug!("Message: {msg:?}");
println!("Message: {msg:?}");
}
Ok(image_name.to_string())
@@ -219,7 +219,7 @@ impl RustWebapp {
);
while let Some(msg) = push_image_stream.next().await {
debug!("Message: {msg:?}");
println!("Message: {msg:?}");
}
Ok(image_tag.to_string())
@@ -288,8 +288,9 @@ impl RustWebapp {
.unwrap(),
);
// Copy the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/home/appuser/{}", self.name);
dockerfile.push(
@@ -327,8 +328,9 @@ impl RustWebapp {
));
// Copy only the compiled binary from the builder stage.
// TODO: Should not be using score name here, instead should use name from Cargo.toml
// https://git.nationtech.io/NationTech/harmony/issues/105
error!(
"FIXME Should not be using score name here, instead should use name from Cargo.toml"
);
let binary_path_in_builder = format!("/app/target/release/{}", self.name);
let binary_path_in_final = format!("/usr/local/bin/{}", self.name);
dockerfile.push(
@@ -528,7 +530,10 @@ spec:
}
/// Packages a Helm chart directory into a .tgz file.
fn package_helm_chart(&self, chart_dir: &Path) -> Result<PathBuf, Box<dyn std::error::Error>> {
fn package_helm_chart(
&self,
chart_dir: &PathBuf,
) -> Result<PathBuf, Box<dyn std::error::Error>> {
let chart_dirname = chart_dir.file_name().expect("Should find a chart dirname");
debug!(
"Launching `helm package {}` cli with CWD {}",
@@ -541,13 +546,14 @@ spec:
);
let output = process::Command::new("helm")
.args(["package", chart_dirname.to_str().unwrap()])
.current_dir(self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
.output()?;
self.check_output(&output, "Failed to package Helm chart")?;
// Helm prints the path of the created chart to stdout.
let tgz_name = String::from_utf8(output.stdout)?
.trim()
.split_whitespace()
.last()
.unwrap_or_default()
@@ -567,7 +573,7 @@ spec:
/// Pushes a packaged Helm chart to an OCI registry.
fn push_helm_chart(
&self,
packaged_chart_path: &Path,
packaged_chart_path: &PathBuf,
) -> Result<String, Box<dyn std::error::Error>> {
// The chart name is the file stem of the .tgz file
let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap();

View File

@@ -41,6 +41,6 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
}
fn name(&self) -> String {
"CertManagerHelmScore".to_string()
format!("CertManagerHelmScore")
}
}

View File

@@ -111,7 +111,7 @@ impl DhcpInterpret {
let boot_filename_outcome = match &self.score.boot_filename {
Some(boot_filename) => {
dhcp_server.set_boot_filename(boot_filename).await?;
dhcp_server.set_boot_filename(&boot_filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set boot filename to {boot_filename}"),
@@ -122,7 +122,7 @@ impl DhcpInterpret {
let filename_outcome = match &self.score.filename {
Some(filename) => {
dhcp_server.set_filename(filename).await?;
dhcp_server.set_filename(&filename).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename to {filename}"),
@@ -133,7 +133,7 @@ impl DhcpInterpret {
let filename64_outcome = match &self.score.filename64 {
Some(filename64) => {
dhcp_server.set_filename64(filename64).await?;
dhcp_server.set_filename64(&filename64).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filename64 to {filename64}"),
@@ -144,7 +144,7 @@ impl DhcpInterpret {
let filenameipxe_outcome = match &self.score.filenameipxe {
Some(filenameipxe) => {
dhcp_server.set_filenameipxe(filenameipxe).await?;
dhcp_server.set_filenameipxe(&filenameipxe).await?;
Outcome::new(
InterpretStatus::SUCCESS,
format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"),
@@ -209,7 +209,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dhcp Interpret execution successful".to_string(),
format!("Dhcp Interpret execution successful"),
))
}
}

View File

@@ -112,7 +112,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret {
Ok(Outcome::new(
InterpretStatus::SUCCESS,
"Dns Interpret execution successful".to_string(),
format!("Dns Interpret execution successful"),
))
}
}

View File

@@ -55,7 +55,7 @@ impl<T: Topology + HelmCommand> Score<T> for HelmChartScore {
}
fn name(&self) -> String {
format!("{} [HelmChartScore]", self.release_name)
format!("{} {} HelmChartScore", self.release_name, self.chart_name)
}
}
@@ -90,10 +90,14 @@ impl HelmChartInterpret {
);
match add_output.status.success() {
true => Ok(()),
false => Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
))),
true => {
return Ok(());
}
false => {
return Err(InterpretError::new(format!(
"Failed to add helm repository!\n{full_output}"
)));
}
}
}
}
@@ -208,7 +212,7 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
}
let res = helm_executor.install_or_upgrade(
ns,
&ns,
&self.score.release_name,
&self.score.chart_name,
self.score.chart_version.as_ref(),
@@ -225,27 +229,24 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret {
match status {
helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new(
InterpretStatus::SUCCESS,
format!("Helm Chart {} deployed", self.score.release_name),
"Helm Chart deployed".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending install...", self.score.release_name),
"Helm Chart Pending install".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new(
InterpretStatus::RUNNING,
format!("Helm Chart {} pending upgrade...", self.score.release_name),
"Helm Chart pending upgrade".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(
"Failed to install helm chart".to_string(),
)),
helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!(
"Helm Chart {} installation failed",
self.score.release_name
))),
}
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmChart
todo!()
}
fn get_version(&self) -> Version {
todo!()
}

View File

@@ -77,11 +77,14 @@ impl HelmCommandExecutor {
)?;
}
let out = self.clone().run_command(
let out = match self.clone().run_command(
self.chart
.clone()
.helm_args(self.globals.chart_home.clone().unwrap()),
)?;
) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap here
let s = String::from_utf8(out.stdout).unwrap();
@@ -95,11 +98,14 @@ impl HelmCommandExecutor {
}
pub fn version(self) -> Result<String, std::io::Error> {
let out = self.run_command(vec![
let out = match self.run_command(vec![
"version".to_string(),
"-c".to_string(),
"--short".to_string(),
])?;
]) {
Ok(out) => out,
Err(e) => return Err(e),
};
// TODO: don't use unwrap
Ok(String::from_utf8(out.stdout).unwrap())
@@ -123,11 +129,15 @@ impl HelmCommandExecutor {
None => PathBuf::from(TempDir::new()?.path()),
};
if let Some(yaml_str) = self.chart.values_inline {
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
match self.chart.values_inline {
Some(yaml_str) => {
let tf: TempFile;
tf = temp_file::with_contents(yaml_str.as_bytes());
self.chart
.additional_values_files
.push(PathBuf::from(tf.path()));
}
None => (),
};
self.env.insert(
@@ -170,9 +180,9 @@ impl HelmChart {
match self.repo {
Some(r) => {
if r.starts_with("oci://") {
args.push(
args.push(String::from(
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
);
));
} else {
args.push("--repo".to_string());
args.push(r.to_string());
@@ -183,9 +193,12 @@ impl HelmChart {
None => args.push(self.name),
};
if let Some(v) = self.version {
args.push("--version".to_string());
args.push(v.to_string());
match self.version {
Some(v) => {
args.push("--version".to_string());
args.push(v.to_string());
}
None => (),
}
args
@@ -349,7 +362,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV
}
fn get_name(&self) -> InterpretName {
InterpretName::HelmCommand
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -1,12 +1,13 @@
use std::path::PathBuf;
use async_trait::async_trait;
use log::debug;
use log::{debug, info};
use serde::Serialize;
use crate::{
config::HARMONY_DATA_DIR,
data::{Id, Version},
instrumentation::{self, HarmonyEvent},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
@@ -29,14 +30,14 @@ impl Default for K3DInstallationScore {
}
impl<T: Topology> Score<T> for K3DInstallationScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
Box::new(K3dInstallationInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
"K3dInstallationScore".into()
todo!()
}
}
@@ -50,14 +51,20 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
_topology: &T,
topology: &T,
) -> Result<Outcome, InterpretError> {
instrumentation::instrument(HarmonyEvent::InterpretExecutionStarted {
topology: topology.name().into(),
interpret: "k3d-installation".into(),
message: "installing k3d...".into(),
})
.unwrap();
let k3d = k3d_rs::K3d::new(
self.score.installation_path.clone(),
Some(self.score.cluster_name.clone()),
);
match k3d.ensure_installed().await {
let outcome = match k3d.ensure_installed().await {
Ok(_client) => {
let msg = format!("k3d cluster '{}' installed ", self.score.cluster_name);
debug!("{msg}");
@@ -66,7 +73,16 @@ impl<T: Topology> Interpret<T> for K3dInstallationInterpret {
Err(msg) => Err(InterpretError::new(format!(
"failed to ensure k3d is installed : {msg}"
))),
}
};
instrumentation::instrument(HarmonyEvent::InterpretExecutionFinished {
topology: topology.name().into(),
interpret: "k3d-installation".into(),
outcome: outcome.clone(),
})
.unwrap();
outcome
}
fn get_name(&self) -> InterpretName {
InterpretName::K3dInstallation

View File

@@ -89,7 +89,7 @@ where
))
}
fn get_name(&self) -> InterpretName {
InterpretName::K8sResource
todo!()
}
fn get_version(&self) -> Version {
todo!()

View File

@@ -128,12 +128,13 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
info!("Deploying score {deployment_score:#?}");
deployment_score.interpret(inventory, topology).await?;
deployment_score
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP deployment_score {deployment_score:?}");
let ingress_path = ingress_path!("/");
let lamp_ingress = K8sIngressScore {
name: fqdn!("lamp-ingress"),
host: fqdn!("test"),
@@ -143,14 +144,17 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
.as_str()
),
port: 8080,
path: Some(ingress_path),
path: Some(ingress_path!("/")),
path_type: None,
namespace: self
.get_namespace()
.map(|nbs| fqdn!(nbs.to_string().as_str())),
};
lamp_ingress.interpret(inventory, topology).await?;
lamp_ingress
.create_interpret()
.execute(inventory, topology)
.await?;
info!("LAMP lamp_ingress {lamp_ingress:?}");
@@ -160,7 +164,7 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for LAMPInterpret {
}
fn get_name(&self) -> InterpretName {
InterpretName::Lamp
todo!()
}
fn get_version(&self) -> Version {
@@ -209,7 +213,7 @@ impl LAMPInterpret {
repository: None,
};
score.interpret(inventory, topology).await
score.create_interpret().execute(inventory, topology).await
}
fn build_dockerfile(&self, score: &LAMPScore) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut dockerfile = Dockerfile::new();

View File

@@ -14,6 +14,5 @@ pub mod monitoring;
pub mod okd;
pub mod opnsense;
pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;

View File

@@ -18,7 +18,7 @@ use crate::{
#[async_trait]
impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<KubePrometheus>> {
Box::new(self.clone())
@@ -28,7 +28,7 @@ impl AlertRule<KubePrometheus> for AlertManagerRuleGroup {
#[async_trait]
impl AlertRule<Prometheus> for AlertManagerRuleGroup {
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
sender.install_rule(self).await
sender.install_rule(&self).await
}
fn clone_box(&self) -> Box<dyn AlertRule<Prometheus>> {
Box::new(self.clone())

View File

@@ -13,7 +13,7 @@ use crate::{
prometheus::prometheus::PrometheusApplicationMonitoring,
},
score::Score,
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
topology::{Topology, oberservability::monitoring::AlertReceiver},
};
#[derive(Debug, Clone, Serialize)]
@@ -33,10 +33,7 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
}
fn name(&self) -> String {
format!(
"{} monitoring [ApplicationMonitoringScore]",
self.application.name()
)
"ApplicationMonitoringScore".to_string()
}
}
@@ -54,27 +51,17 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let result = topology
topology
.install_prometheus(
&self.score.sender,
inventory,
Some(self.score.receivers.clone()),
)
.await;
match result {
Ok(outcome) => match outcome {
PreparationOutcome::Success { details: _ } => {
Ok(Outcome::success("Prometheus installed".into()))
}
PreparationOutcome::Noop => Ok(Outcome::noop()),
},
Err(err) => Err(InterpretError::from(err)),
}
.await
}
fn get_name(&self) -> InterpretName {
InterpretName::ApplicationMonitoring
todo!()
}
fn get_version(&self) -> Version {

View File

@@ -4,14 +4,15 @@ use std::str::FromStr;
use crate::modules::helm::chart::HelmChartScore;
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
let values = r#"
let values = format!(
r#"
rbac:
namespaced: true
sidecar:
dashboards:
enabled: true
"#
.to_string();
);
HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()),

View File

@@ -1,6 +1,7 @@
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use super::crd_prometheuses::LabelSelector;

View File

@@ -1,8 +1,13 @@
use crate::modules::prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
use std::collections::BTreeMap;
use crate::modules::{
monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule,
prometheus::alerts::k8s::{
deployment::alert_deployment_unavailable,
pod::{alert_container_restarting, alert_pod_not_ready, pod_failed},
pvc::high_pvc_fill_rate_over_two_days,
service::alert_service_down,
},
};
use super::crd_prometheus_rules::Rule;

View File

@@ -6,6 +6,8 @@ use serde::{Deserialize, Serialize};
use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
use super::crd_default_rules::build_default_application_rules;
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)]
#[kube(
group = "monitoring.coreos.com",

View File

@@ -1,9 +1,11 @@
use std::collections::HashMap;
use std::collections::{BTreeMap, HashMap};
use kube::CustomResource;
use kube::{CustomResource, Resource, api::ObjectMeta};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::interpret::InterpretError;
use crate::modules::monitoring::kube_prometheus::types::{
HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector,
ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint,
@@ -48,7 +50,7 @@ pub struct ServiceMonitorSpec {
impl Default for ServiceMonitorSpec {
fn default() -> Self {
let labels = HashMap::new();
let mut labels = HashMap::new();
Self {
selector: Selector {
match_labels: { labels },

View File

@@ -27,12 +27,6 @@ pub struct KubePrometheusConfig {
pub alert_rules: Vec<AlertManagerAdditionalPromRules>,
pub additional_service_monitors: Vec<ServiceMonitor>,
}
impl Default for KubePrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheusConfig {
pub fn new() -> Self {
Self {

View File

@@ -35,7 +35,7 @@ pub fn kube_prometheus_helm_chart_score(
let kube_proxy = config.kube_proxy.to_string();
let kube_state_metrics = config.kube_state_metrics.to_string();
let node_exporter = config.node_exporter.to_string();
let _prometheus_operator = config.prometheus_operator.to_string();
let prometheus_operator = config.prometheus_operator.to_string();
let prometheus = config.prometheus.to_string();
let resource_limit = Resources {
limits: Limits {
@@ -64,7 +64,7 @@ pub fn kube_prometheus_helm_chart_score(
indent_lines(&yaml, indent_level + 2)
)
}
let _resource_section = resource_block(&resource_limit, 2);
let resource_section = resource_block(&resource_limit, 2);
let mut values = format!(
r#"

View File

@@ -55,12 +55,6 @@ pub struct KubePrometheus {
pub config: Arc<Mutex<KubePrometheusConfig>>,
}
impl Default for KubePrometheus {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheus {
pub fn new() -> Self {
Self {
@@ -119,7 +113,8 @@ impl KubePrometheus {
topology: &T,
) -> Result<Outcome, InterpretError> {
kube_prometheus_helm_chart_score(self.config.clone())
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
}
}

View File

@@ -1,25 +1,9 @@
use non_blank_string_rs::NonBlankString;
use std::str::FromStr;
use crate::{modules::helm::chart::HelmChartScore, topology::DeploymentTarget};
pub fn ntfy_helm_chart_score(
namespace: String,
host: String,
target: DeploymentTarget,
) -> HelmChartScore {
// TODO not actually the correct logic, this should be fixed by using an ingresss which is the
// correct k8s standard.
//
// Another option is to delegate to the topology the ingress technology it wants to use Route,
// Ingress or other
let route_enabled = match target {
DeploymentTarget::LocalDev => false,
DeploymentTarget::Staging => true,
DeploymentTarget::Production => true,
};
let ingress_enabled = !route_enabled;
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
pub fn ntfy_helm_chart_score(namespace: String, host: String) -> HelmChartScore {
let values = format!(
r#"
replicaCount: 1
@@ -41,14 +25,23 @@ serviceAccount:
service:
type: ClusterIP
port: 8080
port: 80
ingress:
enabled: {ingress_enabled}
enabled: true
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: {host}
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
route:
enabled: {route_enabled}
host: {host}
autoscaling:
enabled: false
@@ -56,7 +49,7 @@ autoscaling:
config:
enabled: true
data:
base-url: "https://{host}"
# base-url: "https://ntfy.something.com"
auth-file: "/var/cache/ntfy/user.db"
auth-default-access: "deny-all"
cache-file: "/var/cache/ntfy/cache.db"
@@ -66,7 +59,6 @@ config:
enable-signup: false
enable-login: "true"
enable-metrics: "true"
listen-http: ":8080"
persistence:
enabled: true
@@ -77,12 +69,16 @@ persistence:
HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str("ntfy").unwrap(),
chart_name: NonBlankString::from_str("oci://hub.nationtech.io/harmony/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7-nationtech.1").unwrap()),
chart_name: NonBlankString::from_str("sarab97/ntfy").unwrap(),
chart_version: Some(NonBlankString::from_str("0.1.7").unwrap()),
values_overrides: None,
values_yaml: Some(values.to_string()),
create_namespace: true,
install_only: false,
repository: None,
repository: Some(HelmRepository::new(
"sarab97".to_string(),
url::Url::parse("https://charts.sarabsingh.com").unwrap(),
true,
)),
}
}

View File

@@ -1,3 +1,2 @@
pub mod helm;
#[allow(clippy::module_inception)]
pub mod ntfy;

View File

@@ -1,7 +1,7 @@
use std::sync::Arc;
use async_trait::async_trait;
use log::info;
use log::debug;
use serde::Serialize;
use strum::{Display, EnumString};
@@ -11,7 +11,7 @@ use crate::{
inventory::Inventory,
modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score,
score::Score,
topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient},
topology::{HelmCommand, K8sclient, Topology, k8s::K8sClient},
};
#[derive(Debug, Clone, Serialize)]
@@ -20,7 +20,7 @@ pub struct NtfyScore {
pub host: String,
}
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for NtfyScore {
impl<T: Topology + HelmCommand + K8sclient> Score<T> for NtfyScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(NtfyInterpret {
score: self.clone(),
@@ -28,7 +28,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Score<T> for N
}
fn name(&self) -> String {
"alert receiver [NtfyScore]".into()
format!("Ntfy")
}
}
@@ -39,21 +39,31 @@ pub struct NtfyInterpret {
#[derive(Debug, EnumString, Display)]
enum NtfyAccessMode {
#[strum(serialize = "read-write", serialize = "rw")]
#[strum(serialize = "read-write", serialize = "rw", to_string = "read-write")]
ReadWrite,
#[strum(serialize = "read-only", serialize = "ro", serialize = "read")]
#[strum(
serialize = "read-only",
serialize = "ro",
serialize = "read",
to_string = "read-only"
)]
ReadOnly,
#[strum(serialize = "write-only", serialize = "wo", serialize = "write")]
#[strum(
serialize = "write-only",
serialize = "wo",
serialize = "write",
to_string = "write-only"
)]
WriteOnly,
#[strum(serialize = "deny", serialize = "none")]
#[strum(serialize = "none", to_string = "deny")]
Deny,
}
#[derive(Debug, EnumString, Display)]
enum NtfyRole {
#[strum(serialize = "user")]
#[strum(serialize = "user", to_string = "user")]
User,
#[strum(serialize = "admin")]
#[strum(serialize = "admin", to_string = "admin")]
Admin,
}
@@ -77,7 +87,7 @@ impl NtfyInterpret {
vec![
"sh",
"-c",
format!("NTFY_PASSWORD={password} ntfy user add --role={role} --ignore-exists {username}")
format!("NTFY_PASSWORD={password} ntfy user add --role={role} {username}")
.as_str(),
],
)
@@ -85,52 +95,69 @@ impl NtfyInterpret {
Ok(())
}
async fn set_access(
&self,
k8s_client: Arc<K8sClient>,
username: &str,
topic: &str,
mode: NtfyAccessMode,
) -> Result<(), String> {
k8s_client
.exec_app(
"ntfy".to_string(),
Some(&self.score.namespace),
vec![
"sh",
"-c",
format!("ntfy access {username} {topic} {mode}").as_str(),
],
)
.await?;
Ok(())
}
}
/// We need a ntfy interpret to wrap the HelmChartScore in order to run the score, and then bootstrap the config inside ntfy
#[async_trait]
impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> for NtfyInterpret {
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for NtfyInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
ntfy_helm_chart_score(
self.score.namespace.clone(),
self.score.host.clone(),
topology.current_target(),
)
.interpret(inventory, topology)
.await?;
ntfy_helm_chart_score(self.score.namespace.clone(), self.score.host.clone())
.create_interpret()
.execute(inventory, topology)
.await?;
info!("installed ntfy helm chart");
debug!("installed ntfy helm chart");
let client = topology
.k8s_client()
.await
.expect("couldn't get k8s client");
info!("deploying ntfy...");
client
.wait_until_deployment_ready(
"ntfy".to_string(),
Some(self.score.namespace.as_str()),
Some(&self.score.namespace.as_str()),
None,
)
.await?;
info!("ntfy deployed");
debug!("created k8s client");
info!("adding user harmony");
self.add_user(client, "harmony", "harmony", Some(NtfyRole::Admin))
.await?;
info!("user added");
Ok(Outcome::success("Ntfy installed".to_string()))
debug!("exec into pod done");
Ok(Outcome::success("installed ntfy".to_string()))
}
fn get_name(&self) -> InterpretName {
InterpretName::Ntfy
todo!()
}
fn get_version(&self) -> Version {
todo!()
}

View File

@@ -1,4 +1,3 @@
pub mod helm;
#[allow(clippy::module_inception)]
pub mod prometheus;
pub mod prometheus_config;

View File

@@ -37,12 +37,6 @@ impl AlertSender for Prometheus {
}
}
impl Default for Prometheus {
fn default() -> Self {
Self::new()
}
}
impl Prometheus {
pub fn new() -> Self {
Self {
@@ -100,7 +94,8 @@ impl Prometheus {
topology: &T,
) -> Result<Outcome, InterpretError> {
prometheus_helm_chart_score(self.config.clone())
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
}
pub async fn install_grafana<T: Topology + HelmCommand + Send + Sync>(
@@ -115,12 +110,13 @@ impl Prometheus {
if let Some(ns) = namespace.as_deref() {
grafana_helm_chart_score(ns)
.interpret(inventory, topology)
.create_interpret()
.execute(inventory, topology)
.await
} else {
Err(InterpretError::new(
"could not install grafana, missing namespace".to_string(),
))
Err(InterpretError::new(format!(
"could not install grafana, missing namespace",
)))
}
}
}

View File

@@ -16,12 +16,6 @@ pub struct PrometheusConfig {
pub additional_service_monitors: Vec<ServiceMonitor>,
}
impl Default for PrometheusConfig {
fn default() -> Self {
Self::new()
}
}
impl PrometheusConfig {
pub fn new() -> Self {
Self {

View File

@@ -32,7 +32,7 @@ impl OKDBootstrapDhcpScore {
logical_host: topology.bootstrap_host.clone(),
physical_host: inventory
.worker_host
.first()
.get(0)
.expect("Should have at least one worker to be used as bootstrap node")
.clone(),
});

View File

@@ -6,12 +6,6 @@ pub struct OKDUpgradeScore {
_target_version: Version,
}
impl Default for OKDUpgradeScore {
fn default() -> Self {
Self::new()
}
}
impl OKDUpgradeScore {
pub fn new() -> Self {
Self {

View File

@@ -61,7 +61,7 @@ impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> S
}
fn name(&self) -> String {
"prometheus alerting [CRDAlertingScore]".into()
"CRDApplicationAlertingScore".into()
}
}
@@ -93,13 +93,13 @@ impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> I
self.install_rules(&self.prometheus_rules, &client).await?;
self.install_monitors(self.service_monitors.clone(), &client)
.await?;
Ok(Outcome::success(
"K8s monitoring components installed".to_string(),
))
Ok(Outcome::success(format!(
"deployed application monitoring composants"
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::K8sPrometheusCrdAlerting
todo!()
}
fn get_version(&self) -> Version {
@@ -118,7 +118,7 @@ impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> I
impl K8sPrometheusCRDAlertingInterpret {
async fn crd_exists(&self, crd: &str) -> bool {
let status = Command::new("sh")
.args(["-c", &format!("kubectl get crd -A | grep -i {crd}")])
.args(["-c", "kubectl get crd -A | grep -i", crd])
.status()
.map_err(|e| InterpretError::new(format!("could not connect to cluster: {}", e)))
.unwrap();
@@ -166,8 +166,7 @@ impl K8sPrometheusCRDAlertingInterpret {
let install_output = Command::new("helm")
.args([
"upgrade",
"--install",
"install",
&chart_name,
tgz_path.to_str().unwrap(),
"--namespace",
@@ -416,7 +415,7 @@ impl K8sPrometheusCRDAlertingInterpret {
async fn install_rules(
&self,
#[allow(clippy::ptr_arg)] rules: &Vec<RuleGroup>,
rules: &Vec<RuleGroup>,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut prom_rule_spec = PrometheusRuleSpec {
@@ -424,7 +423,7 @@ impl K8sPrometheusCRDAlertingInterpret {
};
let default_rules_group = RuleGroup {
name: "default-rules".to_string(),
name: format!("default-rules"),
rules: build_default_application_rules(),
};

View File

@@ -1,4 +1,3 @@
pub mod alerts;
pub mod k8s_prometheus_alerting_score;
#[allow(clippy::module_inception)]
pub mod prometheus;

View File

@@ -1,11 +1,9 @@
use async_trait::async_trait;
use crate::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
topology::{
PreparationError, PreparationOutcome,
oberservability::monitoring::{AlertReceiver, AlertSender},
},
topology::oberservability::monitoring::{AlertReceiver, AlertSender},
};
#[async_trait]
@@ -15,5 +13,5 @@ pub trait PrometheusApplicationMonitoring<S: AlertSender> {
sender: &S,
inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
) -> Result<PreparationOutcome, PreparationError>;
) -> Result<Outcome, InterpretError>;
}

View File

@@ -1,419 +0,0 @@
use std::{
process::Command,
sync::Arc,
time::{Duration, Instant},
};
use async_trait::async_trait;
use log::{info, warn};
use serde::{Deserialize, Serialize};
use tokio::time::sleep;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
#[derive(Debug, Clone, Serialize)]
pub struct CephRemoveOsd {
osd_deployment_name: String,
rook_ceph_namespace: String,
}
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
fn name(&self) -> String {
format!("CephRemoveOsdScore")
}
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(CephRemoveOsdInterpret {
score: self.clone(),
})
}
}
#[derive(Debug, Clone)]
pub struct CephRemoveOsdInterpret {
score: CephRemoveOsd,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.verify_ceph_toolbox_exists(client.clone()).await?;
self.scale_deployment(client.clone()).await?;
self.verify_deployment_scaled(client.clone()).await?;
self.delete_deployment(client.clone()).await?;
self.verify_deployment_deleted(client.clone()).await?;
let osd_id_full = self.get_ceph_osd_id().unwrap();
self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
.await?;
Ok(Outcome::success(format!(
"Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
osd_id_full, self.score.osd_deployment_name
)))
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl CephRemoveOsdInterpret {
pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
let osd_id_numeric = self
.score
.osd_deployment_name
.split('-')
.nth(3)
.ok_or_else(|| {
InterpretError::new(format!(
"Could not parse OSD id from deployment name {}",
self.score.osd_deployment_name
))
})?;
let osd_id_full = format!("osd.{}", osd_id_numeric);
info!(
"Targeting Ceph OSD: {} (parsed from deployment {})",
osd_id_full, self.score.osd_deployment_name
);
Ok(osd_id_full)
}
pub async fn verify_ceph_toolbox_exists(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let toolbox_dep = "rook-ceph-tools".to_string();
match client
.get_deployment(&toolbox_dep, Some(&self.score.rook_ceph_namespace))
.await
{
Ok(Some(deployment)) => {
if let Some(status) = deployment.status {
let ready_count = status.ready_replicas.unwrap_or(0);
if ready_count >= 1 {
return Ok(Outcome::success(format!(
"'{}' is ready with {} replica(s).",
&toolbox_dep, ready_count
)));
} else {
return Err(InterpretError::new(
"ceph-tool-box not ready in cluster".to_string(),
));
}
} else {
Err(InterpretError::new(format!(
"failed to get deployment status {}",
&toolbox_dep
)))
}
}
Ok(None) => Err(InterpretError::new(format!(
"Deployment '{}' not found in namespace '{}'.",
&toolbox_dep, self.score.rook_ceph_namespace
))),
Err(e) => Err(InterpretError::new(format!(
"Failed to query for deployment '{}': {}",
&toolbox_dep, e
))),
}
}
pub async fn scale_deployment(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
info!(
"Scaling down OSD deployment: {}",
self.score.osd_deployment_name
);
client
.scale_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
0,
)
.await?;
Ok(Outcome::success(format!(
"Scaled down deployment {}",
self.score.osd_deployment_name
)))
}
pub async fn verify_deployment_scaled(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!("Waiting for OSD deployment to scale down to 0 replicas");
loop {
let dep = client
.get_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
if let Some(deployment) = dep {
if let Some(status) = deployment.status {
if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
{
return Ok(Outcome::success(
"Deployment successfully scaled down.".to_string(),
));
}
}
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for deployment {} to scale down",
self.score.osd_deployment_name
)));
}
sleep(interval).await;
}
}
fn build_timer(&self) -> (Duration, Duration, Instant) {
let timeout = Duration::from_secs(120);
let interval = Duration::from_secs(5);
let start = Instant::now();
(timeout, interval, start)
}
pub async fn delete_deployment(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
info!(
"Deleting OSD deployment: {}",
self.score.osd_deployment_name
);
client
.delete_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
Ok(Outcome::success(format!(
"deployment {} deleted",
self.score.osd_deployment_name
)))
}
pub async fn verify_deployment_deleted(
&self,
client: Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!("Waiting for OSD deployment to scale down to 0 replicas");
loop {
let dep = client
.get_deployment(
&self.score.osd_deployment_name,
Some(&self.score.rook_ceph_namespace),
)
.await?;
if dep.is_none() {
info!(
"Deployment {} successfully deleted.",
self.score.osd_deployment_name
);
return Ok(Outcome::success(format!(
"Deployment {} deleted.",
self.score.osd_deployment_name
)));
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for deployment {} to be deleted",
self.score.osd_deployment_name
)));
}
sleep(interval).await;
}
}
fn get_osd_tree(&self, json: serde_json::Value) -> Result<CephOsdTree, InterpretError> {
let nodes = json.get("nodes").ok_or_else(|| {
InterpretError::new("Missing 'nodes' field in ceph osd tree JSON".to_string())
})?;
let tree: CephOsdTree = CephOsdTree {
nodes: serde_json::from_value(nodes.clone()).map_err(|e| {
InterpretError::new(format!("Failed to parse ceph osd tree JSON: {}", e))
})?,
};
Ok(tree)
}
pub async fn purge_ceph_osd(
&self,
client: Arc<K8sClient>,
osd_id_full: &str,
) -> Result<Outcome, InterpretError> {
info!(
"Purging OSD {} from Ceph cluster and removing its auth key",
osd_id_full
);
client
.exec_app_capture_output(
"rook-ceph-tools".to_string(),
"app".to_string(),
Some(&self.score.rook_ceph_namespace),
vec![
format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
format!("ceph auth del osd.{osd_id_full}").as_str(),
],
)
.await?;
Ok(Outcome::success(format!(
"osd id {} removed from osd tree",
osd_id_full
)))
}
pub async fn verify_ceph_osd_removal(
&self,
client: Arc<K8sClient>,
osd_id_full: &str,
) -> Result<Outcome, InterpretError> {
let (timeout, interval, start) = self.build_timer();
info!(
"Verifying OSD {} has been removed from the Ceph tree...",
osd_id_full
);
loop {
let output = client
.exec_app_capture_output(
"rook-ceph-tools".to_string(),
"app".to_string(),
Some(&self.score.rook_ceph_namespace),
vec!["ceph osd tree -f json"],
)
.await?;
let tree =
self.get_osd_tree(serde_json::from_str(&output).expect("could not extract json"));
let osd_found = tree
.unwrap()
.nodes
.iter()
.any(|node| node.name == osd_id_full);
if !osd_found {
return Ok(Outcome::success(format!(
"Successfully verified that OSD {} is removed from the Ceph cluster.",
osd_id_full,
)));
}
if start.elapsed() > timeout {
return Err(InterpretError::new(format!(
"Timed out waiting for OSD {} to be removed from Ceph tree",
osd_id_full
)));
}
warn!(
"OSD {} still found in Ceph tree, retrying in {:?}...",
osd_id_full, interval
);
sleep(interval).await;
}
}
}
#[derive(Debug, Deserialize, PartialEq)]
pub struct CephOsdTree {
pub nodes: Vec<CephNode>,
}
#[derive(Debug, Deserialize, PartialEq)]
pub struct CephNode {
pub id: i32,
pub name: String,
#[serde(rename = "type")]
pub node_type: String,
pub type_id: Option<i32>,
pub children: Option<Vec<i32>>,
pub exists: Option<i32>,
pub status: Option<String>,
}
#[cfg(test)]
mod tests {
use serde_json::json;
use super::*;
#[test]
fn test_get_osd_tree() {
let json_data = json!({
"nodes": [
{"id": 1, "name": "osd.1", "type": "osd", "primary_affinity":"1"},
{"id": 2, "name": "osd.2", "type": "osd", "crush_weight": 1.22344}
]
});
let interpret = CephRemoveOsdInterpret {
score: CephRemoveOsd {
osd_deployment_name: "osd-1".to_string(),
rook_ceph_namespace: "dummy_ns".to_string(),
},
};
let json = interpret.get_osd_tree(json_data).unwrap();
let expected = CephOsdTree {
nodes: vec![
CephNode {
id: 1,
name: "osd.1".to_string(),
node_type: "osd".to_string(),
type_id: None,
children: None,
exists: None,
status: None,
},
CephNode {
id: 2,
name: "osd.2".to_string(),
node_type: "osd".to_string(),
type_id: None,
children: None,
exists: None,
status: None,
},
],
};
assert_eq!(json, expected);
}
}

View File

@@ -1,4 +0,0 @@
pub mod ceph_remove_osd_score;
pub mod rook_ceph_helm_chart_score;
pub mod rook_ceph_cluster_helm_chart_score;
pub mod rook_ceph_install_score;

View File

@@ -1,44 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn rook_ceph_cluster_helm_chart(ns: &str) -> HelmChartScore {
let values = r#"
monitoring:
enabled: true
createPrometheusRules: true
cephClusterSpec:
placement:
all:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: storage-node
operator: In
values:
- "true"
dashboard:
ssl: false
prometheusEndpoint: http://prometheus-operated:9090
prometheusEndpointSSLVerify: false
toolbox:
enabled: true
"#
.to_string();
HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()),
release_name: NonBlankString::from_str("rook-ceph").unwrap(),
chart_name: NonBlankString::from_str("https://charts.rook.io/release/rook-release/rook-ceph-cluster").unwrap(),
chart_version: todo!(),
values_overrides: todo!(),
values_yaml: Some(values.to_string()),
create_namespace: todo!(),
install_only: todo!(),
repository: todo!(),
}
}

View File

@@ -1,24 +0,0 @@
use std::str::FromStr;
use non_blank_string_rs::NonBlankString;
use crate::modules::helm::chart::HelmChartScore;
pub fn rook_ceph_helm_chart(ns: &str) -> HelmChartScore {
let values = r#"
monitoring:
enabled: true
"#
.to_string();
HelmChartScore {
namespace: Some(NonBlankString::from_str(ns).unwrap()),
release_name: NonBlankString::from_str("rook-ceph").unwrap(),
chart_name: NonBlankString::from_str("https://charts.rook.io/release/rook-release/rook-ceph").unwrap(),
chart_version: todo!(),
values_overrides: todo!(),
values_yaml: Some(values.to_string()),
create_namespace: todo!(),
install_only: todo!(),
repository: todo!(),
}
}

View File

@@ -1,81 +0,0 @@
use serde::Serialize;
use crate::{
data::{Id, Version},
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HelmCommand, Topology},
};
#[derive(Debug, Clone, Serialize)]
pub struct RookCephInstall {
namespace: String,
}
impl<T: Topology + HelmCommand> Score<T> for RookCephInstall {
fn name(&self) -> String {
"RookCephInstall".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(RookCephInstallInterpret {
score: self.score.clone(),
})
}
}
#[derive(Debug, Clone)]
pub struct RookCephInstallInterpret {
score: RookCephInstall,
}
#[async_trait]
impl<T: Topology + HelmCommand> Interpret<T> for RookCephInstallInterpret {
async fn execute(
&self,
inventory: &Inventory,
topology: &T,
) -> Result<InterpretError, Outcome> {
self.label_nodes();
self.install_rook_helm_chart(self.score.namespace);
self.install_rook_cluster_helm_chart(self.score.namespace);
//TODO I think we will need to add a capability OCClient to interact with the okd
//cli tool
self.add_oc_adm_policy(self.score.namespace);
}
fn get_name(&self) -> InterpretName {
todo!()
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl RookCephInstallInterpret {
fn label_nodes(&self) -> _ {
todo!()
}
fn install_rook_helm_chart(&self, namespace: String) -> _ {
todo!()
}
fn install_rook_cluster_helm_chart(&self, namespace: String) -> _ {
todo!()
}
fn add_oc_adm_policy(&self, namespace: String) -> _ {
todo!()
}
}

View File

@@ -1 +0,0 @@
pub mod ceph;

View File

@@ -17,7 +17,7 @@ impl<T: Topology + TenantCredentialManager> Score<T> for TenantCredentialScore {
}
fn name(&self) -> String {
"TenantCredentialScore".into()
todo!()
}
}

View File

@@ -28,7 +28,7 @@ impl<T: Topology + TenantManager> Score<T> for TenantScore {
}
fn name(&self) -> String {
format!("{} [TenantScore]", self.config.name)
format!("{} TenantScore", self.config.name)
}
}
@@ -47,8 +47,8 @@ impl<T: Topology + TenantManager> Interpret<T> for TenantInterpret {
topology.provision_tenant(&self.tenant_config).await?;
Ok(Outcome::success(format!(
"Tenant provisioned with id '{}'",
self.tenant_config.id
"Successfully provisioned tenant {} with id {}",
self.tenant_config.name, self.tenant_config.id
)))
}

View File

@@ -5,10 +5,6 @@ version.workspace = true
readme.workspace = true
license.workspace = true
[features]
default = ["tui"]
tui = ["dep:harmony_tui"]
[dependencies]
assert_cmd = "2.0.17"
clap = { version = "4.5.35", features = ["derive"] }
@@ -22,7 +18,8 @@ indicatif = "0.18.0"
lazy_static = "1.5.0"
log.workspace = true
indicatif-log-bridge = "0.2.3"
chrono.workspace = true
[dev-dependencies]
harmony = { path = "../harmony", features = ["testing"] }
[features]
default = ["tui"]
tui = ["dep:harmony_tui"]

View File

@@ -1,13 +1,12 @@
use chrono::Local;
use console::style;
use harmony::{
instrumentation::{self, HarmonyEvent},
modules::application::ApplicationFeatureStatus,
topology::TopologyStatus,
use harmony::instrumentation::{self, HarmonyEvent};
use indicatif::{MultiProgress, ProgressBar};
use indicatif_log_bridge::LogWrapper;
use std::{
collections::{HashMap, hash_map},
sync::{Arc, Mutex},
};
use log::{error, info, log_enabled};
use std::io::Write;
use std::sync::{Arc, Mutex};
use crate::progress;
pub fn init() -> tokio::task::JoinHandle<()> {
configure_logger();
@@ -23,178 +22,91 @@ pub fn init() -> tokio::task::JoinHandle<()> {
}
fn configure_logger() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
.format(|buf, record| {
let debug_mode = log_enabled!(log::Level::Debug);
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S");
let level = match record.level() {
log::Level::Error => style("ERROR").red(),
log::Level::Warn => style("WARN").yellow(),
log::Level::Info => style("INFO").green(),
log::Level::Debug => style("DEBUG").blue(),
log::Level::Trace => style("TRACE").magenta(),
};
if let Some(status) = record.key_values().get(log::kv::Key::from("status")) {
let status = status.to_borrowed_str().unwrap();
let emoji = match status {
"finished" => style(crate::theme::EMOJI_SUCCESS.to_string()).green(),
"skipped" => style(crate::theme::EMOJI_SKIP.to_string()).yellow(),
"failed" => style(crate::theme::EMOJI_ERROR.to_string()).red(),
_ => style("".into()),
};
if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {} {}",
timestamp,
level,
record.target(),
emoji,
record.args()
)
} else {
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
}
} else if let Some(emoji) = record.key_values().get(log::kv::Key::from("emoji")) {
if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {} {}",
timestamp,
level,
record.target(),
emoji,
record.args()
)
} else {
writeln!(buf, "[{:<5}] {} {}", level, emoji, record.args())
}
} else if debug_mode {
writeln!(
buf,
"[{} {:<5} {}] {}",
timestamp,
level,
record.target(),
record.args()
)
} else {
writeln!(buf, "[{:<5}] {}", level, record.args())
}
})
.init();
let logger =
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).build();
let level = logger.filter();
let multi = MultiProgress::new();
LogWrapper::new(multi.clone(), logger).try_init().unwrap();
log::set_max_level(level);
}
async fn handle_events() {
let preparing_topology = Arc::new(Mutex::new(false));
let current_score: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
instrumentation::subscribe("Harmony CLI Logger", {
let sections: Arc<Mutex<HashMap<String, MultiProgress>>> =
Arc::new(Mutex::new(HashMap::new()));
let progress_bars: Arc<Mutex<HashMap<String, ProgressBar>>> =
Arc::new(Mutex::new(HashMap::new()));
move |event| {
let preparing_topology = Arc::clone(&preparing_topology);
let current_score = Arc::clone(&current_score);
let sections_clone = Arc::clone(&sections);
let progress_bars_clone = Arc::clone(&progress_bars);
async move {
let mut preparing_topology = preparing_topology.lock().unwrap();
let mut current_score = current_score.lock().unwrap();
let mut sections = sections_clone.lock().unwrap();
let mut progress_bars = progress_bars_clone.lock().unwrap();
match event {
HarmonyEvent::HarmonyStarted => {}
HarmonyEvent::HarmonyFinished => {
let emoji = crate::theme::EMOJI_HARMONY.to_string();
info!(emoji = emoji.as_str(); "Harmony completed");
return false;
HarmonyEvent::PrepareTopologyStarted { topology: name } => {
let section = progress::new_section(format!(
"{} Preparing environment: {name}...",
crate::theme::EMOJI_TOPOLOGY,
));
(*sections).insert(name, section);
}
HarmonyEvent::TopologyStateChanged {
topology,
status,
message,
} => match status {
TopologyStatus::Queued => {}
TopologyStatus::Preparing => {
let emoji = format!("{}", style(crate::theme::EMOJI_TOPOLOGY.to_string()).yellow());
info!(emoji = emoji.as_str(); "Preparing environment: {topology}...");
(*preparing_topology) = true;
}
TopologyStatus::Success => {
(*preparing_topology) = false;
if let Some(message) = message {
info!(status = "finished"; "{message}");
}
}
TopologyStatus::Noop => {
(*preparing_topology) = false;
if let Some(message) = message {
info!(status = "skipped"; "{message}");
}
}
TopologyStatus::Error => {
(*preparing_topology) = false;
if let Some(message) = message {
error!(status = "failed"; "{message}");
}
}
},
HarmonyEvent::InterpretExecutionStarted {
execution_id: _,
topology: _,
interpret: _,
score,
message,
} => {
if *preparing_topology || current_score.is_some() {
info!("{message}");
} else {
(*current_score) = Some(score.clone());
let emoji = format!("{}", style(crate::theme::EMOJI_SCORE).blue());
info!(emoji = emoji.as_str(); "Interpreting score: {score}...");
}
}
HarmonyEvent::InterpretExecutionFinished {
execution_id: _,
topology: _,
interpret: _,
score,
HarmonyEvent::TopologyPrepared {
topology: name,
outcome,
} => {
if current_score.is_some() && current_score.clone().unwrap() == score {
(*current_score) = None;
}
let section = (*sections).get(&name).unwrap();
let progress = progress::add_spinner(section, "".into());
match outcome {
Ok(outcome) => match outcome.status {
harmony::interpret::InterpretStatus::SUCCESS => {
info!(status = "finished"; "{}", outcome.message);
}
harmony::interpret::InterpretStatus::NOOP => {
info!(status = "skipped"; "{}", outcome.message);
}
_ => {
error!(status = "failed"; "{}", outcome.message);
}
},
Err(err) => {
error!(status = "failed"; "{}", err);
match outcome.status {
harmony::interpret::InterpretStatus::SUCCESS => {
progress::success(section, Some(progress), outcome.message);
}
harmony::interpret::InterpretStatus::FAILURE => {
progress::error(section, Some(progress), outcome.message);
}
harmony::interpret::InterpretStatus::RUNNING => todo!(),
harmony::interpret::InterpretStatus::QUEUED => todo!(),
harmony::interpret::InterpretStatus::BLOCKED => todo!(),
harmony::interpret::InterpretStatus::NOOP => {
progress::skip(section, Some(progress), outcome.message);
}
}
}
HarmonyEvent::ApplicationFeatureStateChanged {
topology: _,
application,
feature,
status,
} => match status {
ApplicationFeatureStatus::Installing => {
info!("Installing feature '{}' for '{}'...", feature, application);
HarmonyEvent::InterpretExecutionStarted {
interpret: name,
topology,
message,
} => {
let section = (*sections).get(&topology).unwrap();
let progress_bar = progress::add_spinner(section, message);
(*progress_bars).insert(name, progress_bar);
}
HarmonyEvent::InterpretExecutionFinished {
topology,
interpret: name,
outcome,
} => {
let section = (*sections).get(&topology).unwrap();
let progress_bar = (*progress_bars).get(&name).cloned();
let _ = section.clear();
match outcome {
Ok(outcome) => {
progress::success(section, progress_bar, outcome.message);
}
Err(err) => {
progress::error(section, progress_bar, err.to_string());
}
}
ApplicationFeatureStatus::Installed => {
info!(status = "finished"; "Feature '{}' installed", feature);
}
ApplicationFeatureStatus::Failed { details } => {
error!(status = "failed"; "Feature '{}' installation failed: {}", feature, details);
}
},
(*progress_bars).remove(&name);
}
}
true
}

View File

@@ -1,6 +1,5 @@
use clap::Parser;
use clap::builder::ArgPredicate;
use harmony::instrumentation;
use harmony::inventory::Inventory;
use harmony::maestro::Maestro;
use harmony::{score::Score, topology::Topology};
@@ -12,6 +11,8 @@ pub mod progress;
pub mod theme;
#[cfg(feature = "tui")]
use harmony_tui;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct Args {
@@ -72,7 +73,7 @@ fn maestro_scores_filter<T: Topology>(
}
};
scores_vec
return scores_vec;
}
// TODO: consider adding doctest for this function
@@ -82,7 +83,7 @@ fn list_scores_with_index<T: Topology>(scores_vec: &Vec<Box<dyn Score<T>>>) -> S
let name = s.name();
display_str.push_str(&format!("\n{i}: {name}"));
}
display_str
return display_str;
}
pub async fn run<T: Topology + Send + Sync + 'static>(
@@ -90,60 +91,49 @@ pub async fn run<T: Topology + Send + Sync + 'static>(
topology: T,
scores: Vec<Box<dyn Score<T>>>,
args_struct: Option<Args>,
) -> Result<(), Box<dyn std::error::Error>> {
let args = match args_struct {
Some(args) => args,
None => Args::parse(),
};
#[cfg(not(feature = "tui"))]
if args.interactive {
return Err("Not compiled with interactive support".into());
}
#[cfg(feature = "tui")]
if args.interactive {
return harmony_tui::run(inventory, topology, scores).await;
}
run_cli(inventory, topology, scores, args).await
}
pub async fn run_cli<T: Topology + Send + Sync + 'static>(
inventory: Inventory,
topology: T,
scores: Vec<Box<dyn Score<T>>>,
args: Args,
) -> Result<(), Box<dyn std::error::Error>> {
let cli_logger_handle = cli_logger::init();
let mut maestro = Maestro::initialize(inventory, topology).await.unwrap();
maestro.register_all(scores);
let result = init(maestro, args).await;
let result = init(maestro, args_struct).await;
instrumentation::instrument(instrumentation::HarmonyEvent::HarmonyFinished).unwrap();
let _ = tokio::try_join!(cli_logger_handle);
result
}
async fn init<T: Topology + Send + Sync + 'static>(
maestro: harmony::maestro::Maestro<T>,
args: Args,
args_struct: Option<Args>,
) -> Result<(), Box<dyn std::error::Error>> {
let args = match args_struct {
Some(args) => args,
None => Args::parse(),
};
#[cfg(feature = "tui")]
if args.interactive {
return harmony_tui::init(maestro).await;
}
#[cfg(not(feature = "tui"))]
if args.interactive {
return Err("Not compiled with interactive support".into());
}
let _ = env_logger::builder().try_init();
let scores_vec = maestro_scores_filter(&maestro, args.all, args.filter, args.number);
if scores_vec.is_empty() {
if scores_vec.len() == 0 {
return Err("No score found".into());
}
// if list option is specified, print filtered list and exit
if args.list {
let num_scores = scores_vec.len();
println!("Available scores {num_scores}:");
println!("{}\n\n", list_scores_with_index(&scores_vec));
println!("Available scores:");
println!("{}", list_scores_with_index(&scores_vec));
return Ok(());
}
@@ -175,7 +165,7 @@ async fn init<T: Topology + Send + Sync + 'static>(
}
#[cfg(test)]
mod tests {
mod test {
use harmony::{
inventory::Inventory,
maestro::Maestro,
@@ -202,14 +192,14 @@ mod tests {
let maestro = init_test_maestro();
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: Some("SuccessScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
},
}),
)
.await;
@@ -222,14 +212,14 @@ mod tests {
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: Some("ErrorScore".to_owned()),
interactive: false,
all: true,
number: 0,
list: false,
},
}),
)
.await;
@@ -242,14 +232,14 @@ mod tests {
let res = crate::init(
maestro,
crate::Args {
Some(crate::Args {
yes: true,
filter: None,
interactive: false,
all: false,
number: 0,
list: false,
},
}),
)
.await;
@@ -275,7 +265,7 @@ mod tests {
assert!(
maestro
.interpret(res.first().unwrap().clone_box())
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
@@ -291,7 +281,7 @@ mod tests {
assert!(
maestro
.interpret(res.first().unwrap().clone_box())
.interpret(res.get(0).unwrap().clone_box())
.await
.is_err()
);
@@ -307,7 +297,7 @@ mod tests {
assert!(
maestro
.interpret(res.first().unwrap().clone_box())
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
@@ -329,7 +319,7 @@ mod tests {
assert!(
maestro
.interpret(res.first().unwrap().clone_box())
.interpret(res.get(0).unwrap().clone_box())
.await
.is_ok()
);
@@ -341,6 +331,6 @@ mod tests {
let res = crate::maestro_scores_filter(&maestro, false, None, 11);
assert!(res.is_empty());
assert!(res.len() == 0);
}
}

View File

@@ -1,147 +1,50 @@
use indicatif::{MultiProgress, ProgressBar};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::Duration;
pub trait ProgressTracker: Send + Sync {
fn contains_section(&self, id: &str) -> bool;
fn add_section(&self, id: &str, message: &str);
fn add_task(&self, section_id: &str, task_id: &str, message: &str);
fn finish_task(&self, id: &str, message: &str);
fn fail_task(&self, id: &str, message: &str);
fn skip_task(&self, id: &str, message: &str);
fn clear(&self);
use indicatif::{MultiProgress, ProgressBar};
pub fn new_section(title: String) -> MultiProgress {
let multi_progress = MultiProgress::new();
let _ = multi_progress.println(title);
multi_progress
}
struct Section {
header_index: usize,
task_count: usize,
pb: ProgressBar,
pub fn add_spinner(multi_progress: &MultiProgress, message: String) -> ProgressBar {
let progress = multi_progress.add(ProgressBar::new_spinner());
progress.set_style(crate::theme::SPINNER_STYLE.clone());
progress.set_message(message);
progress.enable_steady_tick(Duration::from_millis(100));
progress
}
struct IndicatifProgressTrackerState {
sections: HashMap<String, Section>,
tasks: HashMap<String, ProgressBar>,
pb_count: usize,
pub fn success(multi_progress: &MultiProgress, progress: Option<ProgressBar>, message: String) {
if let Some(progress) = progress {
multi_progress.remove(&progress)
}
let progress = multi_progress.add(ProgressBar::new_spinner());
progress.set_style(crate::theme::SUCCESS_SPINNER_STYLE.clone());
progress.finish_with_message(message);
}
#[derive(Clone)]
pub struct IndicatifProgressTracker {
mp: MultiProgress,
state: Arc<Mutex<IndicatifProgressTrackerState>>,
pub fn error(multi_progress: &MultiProgress, progress: Option<ProgressBar>, message: String) {
if let Some(progress) = progress {
multi_progress.remove(&progress)
}
let progress = multi_progress.add(ProgressBar::new_spinner());
progress.set_style(crate::theme::ERROR_SPINNER_STYLE.clone());
progress.finish_with_message(message);
}
impl IndicatifProgressTracker {
pub fn new(base: MultiProgress) -> Self {
let sections = HashMap::new();
let tasks = HashMap::new();
let state = Arc::new(Mutex::new(IndicatifProgressTrackerState {
sections,
tasks,
pb_count: 0,
}));
Self { mp: base, state }
}
}
impl ProgressTracker for IndicatifProgressTracker {
fn add_section(&self, id: &str, message: &str) {
let mut state = self.state.lock().unwrap();
let header_pb = self
.mp
.add(ProgressBar::new(1).with_style(crate::theme::SECTION_STYLE.clone()));
header_pb.finish_with_message(message.to_string());
let header_index = state.pb_count;
state.pb_count += 1;
state.sections.insert(
id.to_string(),
Section {
header_index,
task_count: 0,
pb: header_pb,
},
);
}
fn add_task(&self, section_id: &str, task_id: &str, message: &str) {
let mut state = self.state.lock().unwrap();
let insertion_index = {
let current_section = state
.sections
.get(section_id)
.expect("Section ID not found");
current_section.header_index + current_section.task_count + 1 // +1 to insert after header
};
let pb = self.mp.insert(insertion_index, ProgressBar::new_spinner());
pb.set_style(crate::theme::SPINNER_STYLE.clone());
pb.set_prefix(" ");
pb.set_message(message.to_string());
pb.enable_steady_tick(Duration::from_millis(80));
state.pb_count += 1;
let section = state
.sections
.get_mut(section_id)
.expect("Section ID not found");
section.task_count += 1;
// We inserted a new progress bar, so we must update the header_index
// for all subsequent sections.
for (id, s) in state.sections.iter_mut() {
if id != section_id && s.header_index >= insertion_index {
s.header_index += 1;
}
}
state.tasks.insert(task_id.to_string(), pb);
}
fn finish_task(&self, id: &str, message: &str) {
let state = self.state.lock().unwrap();
if let Some(pb) = state.tasks.get(id) {
pb.set_style(crate::theme::SUCCESS_SPINNER_STYLE.clone());
pb.finish_with_message(message.to_string());
}
}
fn fail_task(&self, id: &str, message: &str) {
let state = self.state.lock().unwrap();
if let Some(pb) = state.tasks.get(id) {
pb.set_style(crate::theme::ERROR_SPINNER_STYLE.clone());
pb.finish_with_message(message.to_string());
}
}
fn skip_task(&self, id: &str, message: &str) {
let state = self.state.lock().unwrap();
if let Some(pb) = state.tasks.get(id) {
pb.set_style(crate::theme::SKIP_SPINNER_STYLE.clone());
pb.finish_with_message(message.to_string());
}
}
fn contains_section(&self, id: &str) -> bool {
let state = self.state.lock().unwrap();
state.sections.contains_key(id)
}
fn clear(&self) {
let mut state = self.state.lock().unwrap();
state.tasks.values().for_each(|p| self.mp.remove(p));
state.tasks.clear();
state.sections.values().for_each(|s| self.mp.remove(&s.pb));
state.sections.clear();
state.pb_count = 0;
let _ = self.mp.clear();
pub fn skip(multi_progress: &MultiProgress, progress: Option<ProgressBar>, message: String) {
if let Some(progress) = progress {
multi_progress.remove(&progress)
}
let progress = multi_progress.add(ProgressBar::new_spinner());
progress.set_style(crate::theme::SKIP_SPINNER_STYLE.clone());
progress.finish_with_message(message);
}

View File

@@ -8,27 +8,19 @@ pub static EMOJI_SKIP: Emoji<'_, '_> = Emoji("⏭️", "");
pub static EMOJI_ERROR: Emoji<'_, '_> = Emoji("⚠️", "");
pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", "");
pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", "");
pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", "");
lazy_static! {
pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template("{wide_msg:.bold}")
.unwrap();
pub static ref SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template(" {spinner:.green} {wide_msg}")
.template(" {spinner:.green} {msg}")
.unwrap()
.tick_strings(&["", "", "", "", "", "", "", "", "", ""]);
pub static ref SUCCESS_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
.clone()
.tick_strings(&[format!("{}", EMOJI_SUCCESS).as_str()]);
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template(" {spinner:.orange} {wide_msg}")
.unwrap()
pub static ref SKIP_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
.clone()
.tick_strings(&[format!("{}", EMOJI_SKIP).as_str()]);
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = ProgressStyle::default_spinner()
.template(" {spinner:.red} {wide_msg}")
.unwrap()
pub static ref ERROR_SPINNER_STYLE: ProgressStyle = SPINNER_STYLE
.clone()
.tick_strings(&[format!("{}", EMOJI_ERROR).as_str()]);
}

View File

@@ -1,6 +1,10 @@
use harmony_cli::progress::{IndicatifProgressTracker, ProgressTracker};
use indicatif::MultiProgress;
use std::sync::Arc;
use indicatif::{MultiProgress, ProgressBar};
use indicatif_log_bridge::LogWrapper;
use log::error;
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use crate::instrumentation::{self, HarmonyComposerEvent};
@@ -18,59 +22,85 @@ pub fn init() -> tokio::task::JoinHandle<()> {
}
fn configure_logger() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).build();
let logger =
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).build();
let level = logger.filter();
let multi = MultiProgress::new();
LogWrapper::new(multi.clone(), logger).try_init().unwrap();
log::set_max_level(level);
}
pub async fn handle_events() {
let progress_tracker = Arc::new(IndicatifProgressTracker::new(MultiProgress::new()));
const SETUP_SECTION: &str = "project-initialization";
const COMPILTATION_TASK: &str = "compilation";
const PROGRESS_SETUP: &str = "project-initialization";
const PROGRESS_DEPLOYMENT: &str = "deployment";
instrumentation::subscribe("Harmony Composer Logger", {
let progresses: Arc<Mutex<HashMap<String, MultiProgress>>> =
Arc::new(Mutex::new(HashMap::new()));
let compilation_progress = Arc::new(Mutex::new(None::<ProgressBar>));
move |event| {
let progress_tracker = Arc::clone(&progress_tracker);
let progresses_clone = Arc::clone(&progresses);
let compilation_progress_clone = Arc::clone(&compilation_progress);
async move {
let mut progresses_guard = progresses_clone.lock().unwrap();
let mut compilation_progress_guard = compilation_progress_clone.lock().unwrap();
match event {
HarmonyComposerEvent::HarmonyComposerStarted => {}
HarmonyComposerEvent::ProjectInitializationStarted => {
progress_tracker.add_section(
SETUP_SECTION,
&format!(
"{} Initializing Harmony project...",
harmony_cli::theme::EMOJI_HARMONY,
),
);
let multi_progress = harmony_cli::progress::new_section(format!(
"{} Initializing Harmony project...",
harmony_cli::theme::EMOJI_HARMONY,
));
(*progresses_guard).insert(PROGRESS_SETUP.to_string(), multi_progress);
}
HarmonyComposerEvent::ProjectInitialized => {}
HarmonyComposerEvent::ProjectInitialized => println!("\n"),
HarmonyComposerEvent::ProjectCompilationStarted { details } => {
progress_tracker.add_task(SETUP_SECTION, COMPILTATION_TASK, &details);
let initialization_progress =
(*progresses_guard).get(PROGRESS_SETUP).unwrap();
let _ = initialization_progress.clear();
let progress =
harmony_cli::progress::add_spinner(initialization_progress, details);
*compilation_progress_guard = Some(progress);
}
HarmonyComposerEvent::ProjectCompiled => {
progress_tracker.finish_task(COMPILTATION_TASK, "project compiled");
}
HarmonyComposerEvent::ProjectCompilationFailed { details } => {
progress_tracker.fail_task(COMPILTATION_TASK, &format!("failed to compile project:\n{details}"));
}
HarmonyComposerEvent::DeploymentStarted { target, profile } => {
progress_tracker.add_section(
PROGRESS_DEPLOYMENT,
&format!(
"\n{} Deploying project on target '{target}' with profile '{profile}'...\n",
harmony_cli::theme::EMOJI_DEPLOY,
),
let initialization_progress =
(*progresses_guard).get(PROGRESS_SETUP).unwrap();
harmony_cli::progress::success(
initialization_progress,
(*compilation_progress_guard).take(),
"project compiled".to_string(),
);
}
HarmonyComposerEvent::DeploymentCompleted => {
progress_tracker.clear();
HarmonyComposerEvent::ProjectCompilationFailed { details } => {
let initialization_progress =
(*progresses_guard).get(PROGRESS_SETUP).unwrap();
harmony_cli::progress::error(
initialization_progress,
(*compilation_progress_guard).take(),
"failed to compile project".to_string(),
);
error!("{details}");
}
HarmonyComposerEvent::DeploymentFailed { details } => {
progress_tracker.add_task(PROGRESS_DEPLOYMENT, "deployment-failed", "");
progress_tracker.fail_task("deployment-failed", &details);
},
HarmonyComposerEvent::DeploymentStarted { target } => {
let multi_progress = harmony_cli::progress::new_section(format!(
"{} Starting deployment to {target}...\n\n",
harmony_cli::theme::EMOJI_DEPLOY
));
(*progresses_guard).insert(PROGRESS_DEPLOYMENT.to_string(), multi_progress);
}
HarmonyComposerEvent::DeploymentCompleted { details } => println!("\n"),
HarmonyComposerEvent::Shutdown => {
for (_, progresses) in (*progresses_guard).iter() {
progresses.clear().unwrap();
}
return false;
}
}

View File

@@ -2,28 +2,16 @@ use log::debug;
use once_cell::sync::Lazy;
use tokio::sync::broadcast;
use crate::{HarmonyProfile, HarmonyTarget};
#[derive(Debug, Clone)]
pub enum HarmonyComposerEvent {
HarmonyComposerStarted,
ProjectInitializationStarted,
ProjectInitialized,
ProjectCompilationStarted {
details: String,
},
ProjectCompilationStarted { details: String },
ProjectCompiled,
ProjectCompilationFailed {
details: String,
},
DeploymentStarted {
target: HarmonyTarget,
profile: HarmonyProfile,
},
DeploymentCompleted,
DeploymentFailed {
details: String,
},
ProjectCompilationFailed { details: String },
DeploymentStarted { target: String },
DeploymentCompleted { details: String },
Shutdown,
}
@@ -35,18 +23,9 @@ static HARMONY_COMPOSER_EVENT_BUS: Lazy<broadcast::Sender<HarmonyComposerEvent>>
});
pub fn instrument(event: HarmonyComposerEvent) -> Result<(), &'static str> {
#[cfg(not(test))]
{
match HARMONY_COMPOSER_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
}
#[cfg(test)]
{
let _ = event; // Suppress the "unused variable" warning for `event`
Ok(())
match HARMONY_COMPOSER_EVENT_BUS.send(event) {
Ok(_) => Ok(()),
Err(_) => Err("send error: no subscribers"),
}
}

View File

@@ -20,7 +20,7 @@ mod instrumentation;
#[derive(Parser)]
#[command(version, about, long_about = None, flatten_help = true, propagate_version = true)]
struct GlobalArgs {
#[arg(long, default_value = ".")]
#[arg(long, default_value = "harmony")]
harmony_path: String,
#[arg(long)]
@@ -49,11 +49,14 @@ struct CheckArgs {
#[derive(Args, Clone, Debug)]
struct DeployArgs {
#[arg(long = "target", short = 't', default_value = "local")]
harmony_target: HarmonyTarget,
#[arg(long, default_value_t = false)]
staging: bool,
#[arg(long = "profile", short = 'p', default_value = "dev")]
harmony_profile: HarmonyProfile,
#[arg(long, default_value_t = false)]
prod: bool,
#[arg(long, default_value_t = false)]
smoke_test: bool,
}
#[derive(Args, Clone, Debug)]
@@ -65,38 +68,6 @@ struct AllArgs {
deploy: DeployArgs,
}
#[derive(Clone, Debug, clap::ValueEnum)]
enum HarmonyTarget {
Local,
Remote,
}
impl std::fmt::Display for HarmonyTarget {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HarmonyTarget::Local => f.write_str("local"),
HarmonyTarget::Remote => f.write_str("remote"),
}
}
}
#[derive(Clone, Debug, clap::ValueEnum)]
enum HarmonyProfile {
Dev,
Staging,
Production,
}
impl std::fmt::Display for HarmonyProfile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HarmonyProfile::Dev => f.write_str("dev"),
HarmonyProfile::Staging => f.write_str("staging"),
HarmonyProfile::Production => f.write_str("production"),
}
}
}
#[tokio::main]
async fn main() {
let hc_logger_handle = harmony_composer_logger::init();
@@ -109,13 +80,14 @@ async fn main() {
instrumentation::instrument(HarmonyComposerEvent::ProjectInitializationStarted).unwrap();
let harmony_bin_path: PathBuf = match harmony_path {
true => compile_harmony(
cli_args.compile_method,
cli_args.compile_platform,
cli_args.harmony_path.clone(),
)
.await
.expect("couldn't compile harmony"),
true => {
compile_harmony(
cli_args.compile_method,
cli_args.compile_platform,
cli_args.harmony_path.clone(),
)
.await
}
false => todo!("implement autodetect code"),
};
@@ -151,44 +123,32 @@ async fn main() {
);
}
Commands::Deploy(args) => {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: args.harmony_target.clone(),
profile: args.harmony_profile.clone(),
})
.unwrap();
if matches!(args.harmony_profile, HarmonyProfile::Dev)
&& !matches!(args.harmony_target, HarmonyTarget::Local)
{
instrumentation::instrument(HarmonyComposerEvent::DeploymentFailed {
details: format!(
"Cannot run profile '{}' on target '{}'. Profile '{}' can run locally only.",
args.harmony_profile, args.harmony_target, args.harmony_profile
),
}).unwrap();
return;
let deploy = if args.staging {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "staging".to_string(),
})
.unwrap();
todo!("implement staging deployment")
} else if args.prod {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "prod".to_string(),
})
.unwrap();
todo!("implement prod deployment")
} else {
instrumentation::instrument(HarmonyComposerEvent::DeploymentStarted {
target: "dev".to_string(),
})
.unwrap();
Command::new(harmony_bin_path).arg("-y").arg("-a").spawn()
}
let use_local_k3d = match args.harmony_target {
HarmonyTarget::Local => true,
HarmonyTarget::Remote => false,
};
let mut command = Command::new(harmony_bin_path);
command
.env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
.env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
.arg("-y")
.arg("-a");
info!("{:?}", command);
let deploy = command.spawn().expect("failed to run harmony deploy");
.expect("failed to run harmony deploy");
let deploy_output = deploy.wait_with_output().unwrap();
debug!("{}", String::from_utf8(deploy_output.stdout).unwrap());
instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted).unwrap();
instrumentation::instrument(HarmonyComposerEvent::DeploymentCompleted {
details: String::from_utf8(deploy_output.stdout).unwrap(),
})
.unwrap();
}
Commands::All(_args) => todo!(
"take all previous match arms and turn them into separate functions, and call them all one after the other"
@@ -213,7 +173,7 @@ async fn compile_harmony(
method: Option<CompileMethod>,
platform: Option<String>,
harmony_location: String,
) -> Result<PathBuf, String> {
) -> PathBuf {
let platform = match platform {
Some(p) => p,
None => current_platform::CURRENT_PLATFORM.to_string(),
@@ -243,7 +203,6 @@ async fn compile_harmony(
details: "compiling project with cargo".to_string(),
})
.unwrap();
compile_cargo(platform, harmony_location).await
}
CompileMethod::Docker => {
@@ -251,28 +210,16 @@ async fn compile_harmony(
details: "compiling project with docker".to_string(),
})
.unwrap();
compile_docker(platform, harmony_location).await
}
};
match path {
Ok(path) => {
instrumentation::instrument(HarmonyComposerEvent::ProjectCompiled).unwrap();
Ok(path)
}
Err(err) => {
instrumentation::instrument(HarmonyComposerEvent::ProjectCompilationFailed {
details: err.clone(),
})
.unwrap();
Err(err)
}
}
instrumentation::instrument(HarmonyComposerEvent::ProjectCompiled).unwrap();
path
}
// TODO: make sure this works with cargo workspaces
async fn compile_cargo(platform: String, harmony_location: String) -> Result<PathBuf, String> {
async fn compile_cargo(platform: String, harmony_location: String) -> PathBuf {
let metadata = MetadataCommand::new()
.manifest_path(format!("{}/Cargo.toml", harmony_location))
.exec()
@@ -321,10 +268,7 @@ async fn compile_cargo(platform: String, harmony_location: String) -> Result<Pat
}
}
let res = cargo_build.wait(); //.expect("run cargo command failed");
if res.is_err() {
return Err("cargo build failed".into());
}
cargo_build.wait().expect("run cargo command failed");
let bin = artifacts
.last()
@@ -342,10 +286,10 @@ async fn compile_cargo(platform: String, harmony_location: String) -> Result<Pat
let _copy_res = fs::copy(&bin, &bin_out).await;
}
Ok(bin_out)
bin_out
}
async fn compile_docker(platform: String, harmony_location: String) -> Result<PathBuf, String> {
async fn compile_docker(platform: String, harmony_location: String) -> PathBuf {
let docker_client =
bollard::Docker::connect_with_local_defaults().expect("couldn't connect to docker");
@@ -361,7 +305,7 @@ async fn compile_docker(platform: String, harmony_location: String) -> Result<Pa
.await
.expect("list containers failed");
if !containers.is_empty() {
if containers.len() > 0 {
docker_client
.remove_container("harmony_build", None::<RemoveContainerOptions>)
.await
@@ -423,12 +367,12 @@ async fn compile_docker(platform: String, harmony_location: String) -> Result<Pa
}
// wait until container is no longer running
while (wait.next().await).is_some() {}
while let Some(_) = wait.next().await {}
// hack that should be cleaned up
if platform.contains("windows") {
Ok(PathBuf::from(format!("{}/harmony.exe", harmony_location)))
return PathBuf::from(format!("{}/harmony.exe", harmony_location));
} else {
Ok(PathBuf::from(format!("{}/harmony", harmony_location)))
return PathBuf::from(format!("{}/harmony", harmony_location));
}
}

View File

@@ -1,12 +0,0 @@
[package]
name = "harmony_inventory_agent"
version = "0.1.0"
edition = "2024"
[dependencies]
actix-web = "4.4"
sysinfo = "0.30"
serde.workspace = true
serde_json.workspace = true
log.workspace = true
env_logger.workspace = true

View File

@@ -1,825 +0,0 @@
use log::debug;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::fs;
use std::path::Path;
use std::process::Command;
use sysinfo::System;
#[derive(Serialize, Deserialize, Debug)]
pub struct PhysicalHost {
pub storage_drives: Vec<StorageDrive>,
pub storage_controller: StorageController,
pub memory_modules: Vec<MemoryModule>,
pub cpus: Vec<CPU>,
pub chipset: Chipset,
pub network_interfaces: Vec<NetworkInterface>,
pub management_interface: Option<ManagementInterface>,
pub host_uuid: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct StorageDrive {
pub name: String,
pub model: String,
pub serial: String,
pub size_bytes: u64,
pub logical_block_size: u32,
pub physical_block_size: u32,
pub rotational: bool,
pub wwn: Option<String>,
pub interface_type: String,
pub smart_status: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct StorageController {
pub name: String,
pub driver: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MemoryModule {
pub size_bytes: u64,
pub speed_mhz: Option<u32>,
pub manufacturer: Option<String>,
pub part_number: Option<String>,
pub serial_number: Option<String>,
pub rank: Option<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CPU {
pub model: String,
pub vendor: String,
pub cores: u32,
pub threads: u32,
pub frequency_mhz: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Chipset {
pub name: String,
pub vendor: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct NetworkInterface {
pub name: String,
pub mac_address: String,
pub speed_mbps: Option<u32>,
pub is_up: bool,
pub mtu: u32,
pub ipv4_addresses: Vec<String>,
pub ipv6_addresses: Vec<String>,
pub driver: String,
pub firmware_version: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ManagementInterface {
pub kind: String,
pub address: Option<String>,
pub firmware: Option<String>,
}
impl PhysicalHost {
pub fn gather() -> Result<Self, String> {
let mut sys = System::new_all();
sys.refresh_all();
Self::all_tools_available()?;
Ok(Self {
storage_drives: Self::gather_storage_drives()?,
storage_controller: Self::gather_storage_controller()?,
memory_modules: Self::gather_memory_modules()?,
cpus: Self::gather_cpus(&sys)?,
chipset: Self::gather_chipset()?,
network_interfaces: Self::gather_network_interfaces()?,
management_interface: Self::gather_management_interface()?,
host_uuid: Self::get_host_uuid()?,
})
}
fn all_tools_available() -> Result<(), String> {
let required_tools = [
("lsblk", "--version"),
("lspci", "--version"),
("lsmod", "--version"),
("dmidecode", "--version"),
("smartctl", "--version"),
("ip", "route"), // No version flag available
];
let mut missing_tools = Vec::new();
for (tool, tool_arg) in required_tools.iter() {
// First check if tool exists in PATH using which(1)
let exists = if let Ok(output) = Command::new("which").arg(tool).output() {
output.status.success()
} else {
// Fallback: manual PATH search if which(1) is unavailable
if let Ok(path_var) = std::env::var("PATH") {
path_var.split(':').any(|dir| {
let tool_path = std::path::Path::new(dir).join(tool);
tool_path.exists() && Self::is_executable(&tool_path)
})
} else {
false
}
};
if !exists {
missing_tools.push(*tool);
continue;
}
// Verify tool is functional by checking version/help output
let mut cmd = Command::new(tool);
cmd.arg(tool_arg);
cmd.stdout(std::process::Stdio::null());
cmd.stderr(std::process::Stdio::null());
if let Ok(status) = cmd.status() {
if !status.success() {
missing_tools.push(*tool);
}
} else {
missing_tools.push(*tool);
}
}
if !missing_tools.is_empty() {
let missing_str = missing_tools
.iter()
.map(|s| s.to_string())
.collect::<Vec<String>>()
.join(", ");
return Err(format!(
"The following required tools are not available: {}. Please install these tools to use PhysicalHost::gather()",
missing_str
));
}
Ok(())
}
#[cfg(unix)]
fn is_executable(path: &std::path::Path) -> bool {
use std::os::unix::fs::PermissionsExt;
match std::fs::metadata(path) {
Ok(meta) => meta.permissions().mode() & 0o111 != 0,
Err(_) => false,
}
}
#[cfg(not(unix))]
fn is_executable(_path: &std::path::Path) -> bool {
// On non-Unix systems, we assume existence implies executability
true
}
fn gather_storage_drives() -> Result<Vec<StorageDrive>, String> {
let mut drives = Vec::new();
// Use lsblk with JSON output for robust parsing
let output = Command::new("lsblk")
.args([
"-d",
"-o",
"NAME,MODEL,SERIAL,SIZE,ROTA,WWN",
"-n",
"-e",
"7",
"--json",
])
.output()
.map_err(|e| format!("Failed to execute lsblk: {}", e))?;
if !output.status.success() {
return Err(format!(
"lsblk command failed: {}",
String::from_utf8_lossy(&output.stderr)
));
}
let json: Value = serde_json::from_slice(&output.stdout)
.map_err(|e| format!("Failed to parse lsblk JSON output: {}", e))?;
let blockdevices = json
.get("blockdevices")
.and_then(|v| v.as_array())
.ok_or("Invalid lsblk JSON: missing 'blockdevices' array")?;
for device in blockdevices {
let name = device
.get("name")
.and_then(|v| v.as_str())
.ok_or("Missing 'name' in lsblk device")?
.to_string();
if name.is_empty() {
continue;
}
let model = device
.get("model")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.unwrap_or_default();
let serial = device
.get("serial")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.unwrap_or_default();
let size_str = device
.get("size")
.and_then(|v| v.as_str())
.ok_or("Missing 'size' in lsblk device")?;
let size_bytes = Self::parse_size(size_str)?;
let rotational = device
.get("rota")
.and_then(|v| v.as_bool())
.ok_or("Missing 'rota' in lsblk device")?;
let wwn = device
.get("wwn")
.and_then(|v| v.as_str())
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty() && s != "null");
let device_path = Path::new("/sys/block").join(&name);
let logical_block_size = Self::read_sysfs_u32(
&device_path.join("queue/logical_block_size"),
)
.map_err(|e| format!("Failed to read logical block size for {}: {}", name, e))?;
let physical_block_size = Self::read_sysfs_u32(
&device_path.join("queue/physical_block_size"),
)
.map_err(|e| format!("Failed to read physical block size for {}: {}", name, e))?;
let interface_type = Self::get_interface_type(&name, &device_path)?;
let smart_status = Self::get_smart_status(&name)?;
let mut drive = StorageDrive {
name: name.clone(),
model,
serial,
size_bytes,
logical_block_size,
physical_block_size,
rotational,
wwn,
interface_type,
smart_status,
};
// Enhance with additional sysfs info if available
if device_path.exists() {
if drive.model.is_empty() {
drive.model = Self::read_sysfs_string(&device_path.join("device/model"))
.map_err(|e| format!("Failed to read model for {}: {}", name, e))?;
}
if drive.serial.is_empty() {
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"))
.map_err(|e| format!("Failed to read serial for {}: {}", name, e))?;
}
}
drives.push(drive);
}
Ok(drives)
}
fn gather_storage_controller() -> Result<StorageController, String> {
let mut controller = StorageController {
name: "Unknown".to_string(),
driver: "Unknown".to_string(),
};
// Use lspci with JSON output if available
let output = Command::new("lspci")
.args(["-nn", "-d", "::0100", "-J"]) // Storage controllers class with JSON
.output()
.map_err(|e| format!("Failed to execute lspci: {}", e))?;
if output.status.success() {
let json: Value = serde_json::from_slice(&output.stdout)
.map_err(|e| format!("Failed to parse lspci JSON output: {}", e))?;
if let Some(devices) = json.as_array() {
for device in devices {
if let Some(device_info) = device.as_object()
&& let Some(name) = device_info
.get("device")
.and_then(|v| v.as_object())
.and_then(|v| v.get("name"))
.and_then(|v| v.as_str())
{
controller.name = name.to_string();
break;
}
}
}
}
// Fallback to text output if JSON fails or no device found
if controller.name == "Unknown" {
let output = Command::new("lspci")
.args(["-nn", "-d", "::0100"]) // Storage controllers class
.output()
.map_err(|e| format!("Failed to execute lspci (fallback): {}", e))?;
if output.status.success() {
let output_str = String::from_utf8_lossy(&output.stdout);
if let Some(line) = output_str.lines().next() {
let parts: Vec<&str> = line.split(':').collect();
if parts.len() > 2 {
controller.name = parts[2].trim().to_string();
}
}
}
}
// Try to get driver info from lsmod
let output = Command::new("lsmod")
.output()
.map_err(|e| format!("Failed to execute lsmod: {}", e))?;
if output.status.success() {
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("ahci")
|| line.contains("nvme")
|| line.contains("megaraid")
|| line.contains("mpt3sas")
{
let parts: Vec<&str> = line.split_whitespace().collect();
if !parts.is_empty() {
controller.driver = parts[0].to_string();
break;
}
}
}
}
Ok(controller)
}
fn gather_memory_modules() -> Result<Vec<MemoryModule>, String> {
let mut modules = Vec::new();
let output = Command::new("dmidecode")
.arg("--type")
.arg("17")
.output()
.map_err(|e| format!("Failed to execute dmidecode: {}", e))?;
if !output.status.success() {
return Err(format!(
"dmidecode command failed: {}",
String::from_utf8_lossy(&output.stderr)
));
}
let output_str = String::from_utf8(output.stdout)
.map_err(|e| format!("Failed to parse dmidecode output: {}", e))?;
let sections: Vec<&str> = output_str.split("Memory Device").collect();
for section in sections.into_iter().skip(1) {
let mut module = MemoryModule {
size_bytes: 0,
speed_mhz: None,
manufacturer: None,
part_number: None,
serial_number: None,
rank: None,
};
for line in section.lines() {
let line = line.trim();
if let Some(size_str) = line.strip_prefix("Size: ") {
if size_str != "No Module Installed"
&& let Some((num, unit)) = size_str.split_once(' ')
&& let Ok(num) = num.parse::<u64>()
{
module.size_bytes = match unit {
"MB" => num * 1024 * 1024,
"GB" => num * 1024 * 1024 * 1024,
"KB" => num * 1024,
_ => 0,
};
}
} else if let Some(speed_str) = line.strip_prefix("Speed: ") {
if let Some((num, _unit)) = speed_str.split_once(' ') {
module.speed_mhz = num.parse().ok();
}
} else if let Some(man) = line.strip_prefix("Manufacturer: ") {
module.manufacturer = Some(man.to_string());
} else if let Some(part) = line.strip_prefix("Part Number: ") {
module.part_number = Some(part.to_string());
} else if let Some(serial) = line.strip_prefix("Serial Number: ") {
module.serial_number = Some(serial.to_string());
} else if let Some(rank) = line.strip_prefix("Rank: ") {
module.rank = rank.parse().ok();
}
}
if module.size_bytes > 0 {
modules.push(module);
}
}
Ok(modules)
}
fn gather_cpus(sys: &System) -> Result<Vec<CPU>, String> {
let mut cpus = Vec::new();
let global_cpu = sys.global_cpu_info();
cpus.push(CPU {
model: global_cpu.brand().to_string(),
vendor: global_cpu.vendor_id().to_string(),
cores: sys.physical_core_count().unwrap_or(1) as u32,
threads: sys.cpus().len() as u32,
frequency_mhz: global_cpu.frequency(),
});
Ok(cpus)
}
fn gather_chipset() -> Result<Chipset, String> {
Ok(Chipset {
name: Self::read_dmi("baseboard-product-name")?,
vendor: Self::read_dmi("baseboard-manufacturer")?,
})
}
fn gather_network_interfaces() -> Result<Vec<NetworkInterface>, String> {
let mut interfaces = Vec::new();
let sys_net_path = Path::new("/sys/class/net");
let entries = fs::read_dir(sys_net_path)
.map_err(|e| format!("Failed to read /sys/class/net: {}", e))?;
for entry in entries {
let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?;
let iface_name = entry
.file_name()
.into_string()
.map_err(|_| "Invalid UTF-8 in interface name")?;
let iface_path = entry.path();
// Skip virtual interfaces
if iface_name.starts_with("lo")
|| iface_name.starts_with("docker")
|| iface_name.starts_with("virbr")
|| iface_name.starts_with("veth")
|| iface_name.starts_with("br-")
|| iface_name.starts_with("tun")
|| iface_name.starts_with("wg")
{
continue;
}
// Check if it's a physical interface by looking for device directory
if !iface_path.join("device").exists() {
continue;
}
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
let speed_mbps = if iface_path.join("speed").exists() {
match Self::read_sysfs_u32(&iface_path.join("speed")) {
Ok(speed) => Some(speed),
Err(e) => {
debug!(
"Failed to read speed for {}: {} . This is expected to fail on wifi interfaces.",
iface_name, e
);
None
}
}
} else {
None
};
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"))
.map_err(|e| format!("Failed to read operstate for {}: {}", iface_name, e))?;
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu"))
.map_err(|e| format!("Failed to read MTU for {}: {}", iface_name, e))?;
let driver =
Self::read_sysfs_symlink_basename(&iface_path.join("device/driver/module"))
.map_err(|e| format!("Failed to read driver for {}: {}", iface_name, e))?;
let firmware_version = Self::read_sysfs_opt_string(
&iface_path.join("device/firmware_version"),
)
.map_err(|e| format!("Failed to read firmware version for {}: {}", iface_name, e))?;
// Get IP addresses using ip command with JSON output
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name)
.map_err(|e| format!("Failed to get IP addresses for {}: {}", iface_name, e))?;
interfaces.push(NetworkInterface {
name: iface_name,
mac_address,
speed_mbps,
is_up: operstate == "up",
mtu,
ipv4_addresses,
ipv6_addresses,
driver,
firmware_version,
});
}
Ok(interfaces)
}
fn gather_management_interface() -> Result<Option<ManagementInterface>, String> {
if Path::new("/dev/ipmi0").exists() {
Ok(Some(ManagementInterface {
kind: "IPMI".to_string(),
address: None,
firmware: Some(Self::read_dmi("bios-version")?),
}))
} else if Path::new("/sys/class/misc/mei").exists() {
Ok(Some(ManagementInterface {
kind: "Intel ME".to_string(),
address: None,
firmware: None,
}))
} else {
Ok(None)
}
}
fn get_host_uuid() -> Result<String, String> {
Self::read_dmi("system-uuid")
}
// Helper methods
fn read_sysfs_string(path: &Path) -> Result<String, String> {
fs::read_to_string(path)
.map(|s| s.trim().to_string())
.map_err(|e| format!("Failed to read {}: {}", path.display(), e))
}
fn read_sysfs_opt_string(path: &Path) -> Result<Option<String>, String> {
match fs::read_to_string(path) {
Ok(s) => {
let s = s.trim().to_string();
Ok(if s.is_empty() { None } else { Some(s) })
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(e) => Err(format!("Failed to read {}: {}", path.display(), e)),
}
}
fn read_sysfs_u32(path: &Path) -> Result<u32, String> {
fs::read_to_string(path)
.map_err(|e| format!("Failed to read {}: {}", path.display(), e))?
.trim()
.parse()
.map_err(|e| format!("Failed to parse {}: {}", path.display(), e))
}
fn read_sysfs_symlink_basename(path: &Path) -> Result<String, String> {
match fs::read_link(path) {
Ok(target_path) => match target_path.file_name() {
Some(name_osstr) => match name_osstr.to_str() {
Some(name_str) => Ok(name_str.to_string()),
None => Err(format!(
"Symlink target basename is not valid UTF-8: {}",
target_path.display()
)),
},
None => Err(format!(
"Symlink target has no basename: {} -> {}",
path.display(),
target_path.display()
)),
},
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Err(format!(
"Could not resolve symlink for path : {}",
path.display()
)),
Err(e) => Err(format!("Failed to read symlink {}: {}", path.display(), e)),
}
}
fn read_dmi(field: &str) -> Result<String, String> {
let output = Command::new("dmidecode")
.arg("-s")
.arg(field)
.output()
.map_err(|e| format!("Failed to execute dmidecode for field {}: {}", field, e))?;
if !output.status.success() {
return Err(format!(
"dmidecode command failed for field {}: {}",
field,
String::from_utf8_lossy(&output.stderr)
));
}
String::from_utf8(output.stdout)
.map(|s| s.trim().to_string())
.map_err(|e| {
format!(
"Failed to parse dmidecode output for field {}: {}",
field, e
)
})
}
fn get_interface_type(device_name: &str, device_path: &Path) -> Result<String, String> {
if device_name.starts_with("nvme") {
Ok("NVMe".to_string())
} else if device_name.starts_with("sd") {
Ok("SATA".to_string())
} else if device_name.starts_with("hd") {
Ok("IDE".to_string())
} else if device_name.starts_with("vd") {
Ok("VirtIO".to_string())
} else {
// Try to determine from device path
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))?;
Ok(subsystem
.split('/')
.next_back()
.unwrap_or("Unknown")
.to_string())
}
}
fn get_smart_status(device_name: &str) -> Result<Option<String>, String> {
let output = Command::new("smartctl")
.arg("-H")
.arg(format!("/dev/{}", device_name))
.output()
.map_err(|e| format!("Failed to execute smartctl for {}: {}", device_name, e))?;
if !output.status.success() {
return Ok(None);
}
let stdout = String::from_utf8(output.stdout)
.map_err(|e| format!("Failed to parse smartctl output for {}: {}", device_name, e))?;
for line in stdout.lines() {
if line.contains("SMART overall-health self-assessment") {
if let Some(status) = line.split(':').nth(1) {
return Ok(Some(status.trim().to_string()));
}
}
}
Ok(None)
}
fn parse_size(size_str: &str) -> Result<u64, String> {
debug!("Parsing size_str '{size_str}'");
let size;
if size_str.ends_with('T') {
size = size_str[..size_str.len() - 1]
.parse::<f64>()
.map(|t| t * 1024.0 * 1024.0 * 1024.0 * 1024.0)
.map_err(|e| format!("Failed to parse T size '{}': {}", size_str, e))
} else if size_str.ends_with('G') {
size = size_str[..size_str.len() - 1]
.parse::<f64>()
.map(|g| g * 1024.0 * 1024.0 * 1024.0)
.map_err(|e| format!("Failed to parse G size '{}': {}", size_str, e))
} else if size_str.ends_with('M') {
size = size_str[..size_str.len() - 1]
.parse::<f64>()
.map(|m| m * 1024.0 * 1024.0)
.map_err(|e| format!("Failed to parse M size '{}': {}", size_str, e))
} else if size_str.ends_with('K') {
size = size_str[..size_str.len() - 1]
.parse::<f64>()
.map(|k| k * 1024.0)
.map_err(|e| format!("Failed to parse K size '{}': {}", size_str, e))
} else if size_str.ends_with('B') {
size = size_str[..size_str.len() - 1]
.parse::<f64>()
.map_err(|e| format!("Failed to parse B size '{}': {}", size_str, e))
} else {
size = size_str
.parse::<f64>()
.map_err(|e| format!("Failed to parse size '{}': {}", size_str, e))
}
size.map(|s| s as u64)
}
fn get_interface_ips_json(iface_name: &str) -> Result<(Vec<String>, Vec<String>), String> {
let mut ipv4 = Vec::new();
let mut ipv6 = Vec::new();
// Get IPv4 addresses using JSON output
let output = Command::new("ip")
.args(["-j", "-4", "addr", "show", iface_name])
.output()
.map_err(|e| {
format!(
"Failed to execute ip command for IPv4 on {}: {}",
iface_name, e
)
})?;
if !output.status.success() {
return Err(format!(
"ip command for IPv4 on {} failed: {}",
iface_name,
String::from_utf8_lossy(&output.stderr)
));
}
let json: Value = serde_json::from_slice(&output.stdout).map_err(|e| {
format!(
"Failed to parse ip JSON output for IPv4 on {}: {}",
iface_name, e
)
})?;
if let Some(addrs) = json.as_array() {
for addr_info in addrs {
if let Some(addr_info_obj) = addr_info.as_object()
&& let Some(addr_info) =
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
{
for addr in addr_info {
if let Some(addr_obj) = addr.as_object()
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
{
ipv4.push(ip.to_string());
}
}
}
}
}
// Get IPv6 addresses using JSON output
let output = Command::new("ip")
.args(["-j", "-6", "addr", "show", iface_name])
.output()
.map_err(|e| {
format!(
"Failed to execute ip command for IPv6 on {}: {}",
iface_name, e
)
})?;
if !output.status.success() {
return Err(format!(
"ip command for IPv6 on {} failed: {}",
iface_name,
String::from_utf8_lossy(&output.stderr)
));
}
let json: Value = serde_json::from_slice(&output.stdout).map_err(|e| {
format!(
"Failed to parse ip JSON output for IPv6 on {}: {}",
iface_name, e
)
})?;
if let Some(addrs) = json.as_array() {
for addr_info in addrs {
if let Some(addr_info_obj) = addr_info.as_object()
&& let Some(addr_info) =
addr_info_obj.get("addr_info").and_then(|v| v.as_array())
{
for addr in addr_info {
if let Some(addr_obj) = addr.as_object()
&& let Some(ip) = addr_obj.get("local").and_then(|v| v.as_str())
{
// Skip link-local addresses
if !ip.starts_with("fe80::") {
ipv6.push(ip.to_string());
}
}
}
}
}
}
Ok((ipv4, ipv6))
}
}

View File

@@ -1,37 +0,0 @@
// src/main.rs
use actix_web::{App, HttpServer, Responder, get};
use hwinfo::PhysicalHost;
use std::env;
mod hwinfo;
#[get("/inventory")]
async fn inventory() -> impl Responder {
log::info!("Received inventory request");
let host = PhysicalHost::gather();
match host {
Ok(host) => {
log::info!("Inventory data gathered successfully");
actix_web::HttpResponse::Ok().json(host)
}
Err(error) => {
log::error!("Inventory data gathering FAILED");
actix_web::HttpResponse::InternalServerError().json(error)
}
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init();
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
let bind_addr = format!("0.0.0.0:{}", port);
log::info!("Starting inventory agent on {}", bind_addr);
HttpServer::new(|| App::new().service(inventory))
.bind(&bind_addr)?
.run()
.await
}

View File

@@ -11,13 +11,13 @@ pub fn ip(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr);
let ip_str = input.value();
if ip_str.parse::<std::net::Ipv4Addr>().is_ok() {
if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() {
let expanded =
quote! { std::net::IpAddr::V4(#ip_str.parse::<std::net::Ipv4Addr>().unwrap()) };
return TokenStream::from(expanded);
}
if ip_str.parse::<std::net::Ipv6Addr>().is_ok() {
if let Ok(_) = ip_str.parse::<std::net::Ipv6Addr>() {
let expanded =
quote! { std::net::IpAddr::V6(#ip_str.parse::<std::net::Ipv6Addr>().unwrap()) };
return TokenStream::from(expanded);
@@ -31,7 +31,7 @@ pub fn ipv4(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr);
let ip_str = input.value();
if ip_str.parse::<std::net::Ipv4Addr>().is_ok() {
if let Ok(_) = ip_str.parse::<std::net::Ipv4Addr>() {
let expanded = quote! { #ip_str.parse::<std::net::Ipv4Addr>().unwrap() };
return TokenStream::from(expanded);
}
@@ -127,7 +127,7 @@ pub fn ingress_path(input: TokenStream) -> TokenStream {
match path_str.starts_with("/") {
true => {
let expanded = quote! {(#path_str.to_string()) };
TokenStream::from(expanded)
return TokenStream::from(expanded);
}
false => panic!("Invalid ingress path"),
}
@@ -138,7 +138,7 @@ pub fn cidrv4(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr);
let cidr_str = input.value();
if cidr_str.parse::<cidr::Ipv4Cidr>().is_ok() {
if let Ok(_) = cidr_str.parse::<cidr::Ipv4Cidr>() {
let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() };
return TokenStream::from(expanded);
}

View File

@@ -1,23 +0,0 @@
[package]
name = "harmony-secret"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" }
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
thiserror.workspace = true
lazy_static.workspace = true
directories.workspace = true
log.workspace = true
infisical = "0.0.2"
tokio.workspace = true
async-trait.workspace = true
http.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true
tempfile.workspace = true

View File

@@ -1,18 +0,0 @@
use lazy_static::lazy_static;
lazy_static! {
pub static ref SECRET_NAMESPACE: String =
std::env::var("HARMONY_SECRET_NAMESPACE").expect("HARMONY_SECRET_NAMESPACE environment variable is required, it should contain the name of the project you are working on to access its secrets");
pub static ref SECRET_STORE: Option<String> =
std::env::var("HARMONY_SECRET_STORE").ok();
pub static ref INFISICAL_URL: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_URL").ok();
pub static ref INFISICAL_PROJECT_ID: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_PROJECT_ID").ok();
pub static ref INFISICAL_ENVIRONMENT: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_ENVIRONMENT").ok();
pub static ref INFISICAL_CLIENT_ID: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_ID").ok();
pub static ref INFISICAL_CLIENT_SECRET: Option<String> =
std::env::var("HARMONY_SECRET_INFISICAL_CLIENT_SECRET").ok();
}

View File

@@ -1,166 +0,0 @@
pub mod config;
mod store;
use crate::config::SECRET_NAMESPACE;
use async_trait::async_trait;
use config::INFISICAL_CLIENT_ID;
use config::INFISICAL_CLIENT_SECRET;
use config::INFISICAL_ENVIRONMENT;
use config::INFISICAL_PROJECT_ID;
use config::INFISICAL_URL;
use config::SECRET_STORE;
use serde::{Serialize, de::DeserializeOwned};
use std::fmt;
use store::InfisicalSecretStore;
use store::LocalFileSecretStore;
use thiserror::Error;
use tokio::sync::OnceCell;
pub use harmony_secret_derive::Secret;
// The Secret trait remains the same.
pub trait Secret: Serialize + DeserializeOwned + Sized {
const KEY: &'static str;
}
// The error enum remains the same.
#[derive(Debug, Error)]
pub enum SecretStoreError {
#[error("Secret not found for key '{key}' in namespace '{namespace}'")]
NotFound { namespace: String, key: String },
#[error("Failed to deserialize secret for key '{key}': {source}")]
Deserialization {
key: String,
source: serde_json::Error,
},
#[error("Failed to serialize secret for key '{key}': {source}")]
Serialization {
key: String,
source: serde_json::Error,
},
#[error("Underlying storage error: {0}")]
Store(#[from] Box<dyn std::error::Error + Send + Sync>),
}
// The trait is now async!
#[async_trait]
pub trait SecretStore: fmt::Debug + Send + Sync {
async fn get_raw(&self, namespace: &str, key: &str) -> Result<Vec<u8>, SecretStoreError>;
async fn set_raw(
&self,
namespace: &str,
key: &str,
value: &[u8],
) -> Result<(), SecretStoreError>;
}
// Use OnceCell for async-friendly, one-time initialization.
static SECRET_MANAGER: OnceCell<SecretManager> = OnceCell::const_new();
/// Initializes and returns a reference to the global SecretManager.
async fn get_secret_manager() -> &'static SecretManager {
SECRET_MANAGER.get_or_init(init_secret_manager).await
}
/// The async initialization function for the SecretManager.
async fn init_secret_manager() -> SecretManager {
let default_secret_score = "infisical".to_string();
let store_type = SECRET_STORE.as_ref().unwrap_or(&default_secret_score);
let store: Box<dyn SecretStore> = match store_type.as_str() {
"file" => Box::new(LocalFileSecretStore::default()),
"infisical" | _ => {
let store = InfisicalSecretStore::new(
INFISICAL_URL.clone().expect("Infisical url must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_URL"),
INFISICAL_PROJECT_ID.clone().expect("Infisical project id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_PROJECT_ID"),
INFISICAL_ENVIRONMENT.clone().expect("Infisical environment must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_ENVIRONMENT"),
INFISICAL_CLIENT_ID.clone().expect("Infisical client id must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_ID"),
INFISICAL_CLIENT_SECRET.clone().expect("Infisical client secret must be set, see harmony_secret config for ways to provide it. You can try with HARMONY_SECRET_INFISICAL_CLIENT_SECRET"),
)
.await
.expect("Failed to initialize Infisical secret store");
Box::new(store)
}
};
SecretManager::new(SECRET_NAMESPACE.clone(), store)
}
/// Manages the lifecycle of secrets, providing a simple static API.
#[derive(Debug)]
pub struct SecretManager {
namespace: String,
store: Box<dyn SecretStore>,
}
impl SecretManager {
fn new(namespace: String, store: Box<dyn SecretStore>) -> Self {
Self { namespace, store }
}
/// Retrieves and deserializes a secret.
pub async fn get<T: Secret>() -> Result<T, SecretStoreError> {
let manager = get_secret_manager().await;
let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?;
serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization {
key: T::KEY.to_string(),
source: e,
})
}
/// Serializes and stores a secret.
pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> {
let manager = get_secret_manager().await;
let raw_value =
serde_json::to_vec(secret).map_err(|e| SecretStoreError::Serialization {
key: T::KEY.to_string(),
source: e,
})?;
manager
.store
.set_raw(&manager.namespace, T::KEY, &raw_value)
.await
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct TestUserMeta {
labels: Vec<String>,
}
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct TestSecret {
user: String,
password: String,
metadata: TestUserMeta,
}
#[cfg(secrete2etest)]
#[tokio::test]
async fn set_and_retrieve_secret() {
let secret = TestSecret {
user: String::from("user"),
password: String::from("password"),
metadata: TestUserMeta {
labels: vec![
String::from("label1"),
String::from("label2"),
String::from(
"some longet label with \" special @#%$)(udiojcia[]]] \"'asdij'' characters Nдs はにほへとちり าฟันพัฒนา yağız şoföre ç <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> <20> 👩‍👩‍👧‍👦 /span> 👩‍👧‍👦 and why not emojis ",
),
],
},
};
SecretManager::set(&secret).await.unwrap();
let value = SecretManager::get::<TestSecret>().await.unwrap();
assert_eq!(value, secret);
}
}

View File

@@ -1,129 +0,0 @@
use crate::{SecretStore, SecretStoreError};
use async_trait::async_trait;
use infisical::{
AuthMethod, InfisicalError,
client::Client,
secrets::{CreateSecretRequest, GetSecretRequest, UpdateSecretRequest},
};
use log::{info, warn};
#[derive(Debug)]
pub struct InfisicalSecretStore {
client: Client,
project_id: String,
environment: String,
}
impl InfisicalSecretStore {
/// Creates a new, authenticated Infisical client.
pub async fn new(
base_url: String,
project_id: String,
environment: String,
client_id: String,
client_secret: String,
) -> Result<Self, InfisicalError> {
info!("INFISICAL_STORE: Initializing client for URL: {base_url}");
// The builder and login logic remains the same.
let mut client = Client::builder().base_url(base_url).build().await?;
let auth_method = AuthMethod::new_universal_auth(client_id, client_secret);
client.login(auth_method).await?;
info!("INFISICAL_STORE: Client authenticated successfully.");
Ok(Self {
client,
project_id,
environment,
})
}
}
#[async_trait]
impl SecretStore for InfisicalSecretStore {
async fn get_raw(&self, _environment: &str, key: &str) -> Result<Vec<u8>, SecretStoreError> {
let environment = &self.environment;
info!("INFISICAL_STORE: Getting key '{key}' from environment '{environment}'");
let request = GetSecretRequest::builder(key, &self.project_id, environment).build();
match self.client.secrets().get(request).await {
Ok(secret) => Ok(secret.secret_value.into_bytes()),
Err(e) => {
// Correctly match against the actual InfisicalError enum.
match e {
// The specific case for a 404 Not Found error.
InfisicalError::HttpError { status, .. }
if status == http::StatusCode::NOT_FOUND =>
{
Err(SecretStoreError::NotFound {
namespace: environment.to_string(),
key: key.to_string(),
})
}
// For all other errors, wrap them in our generic Store error.
_ => Err(SecretStoreError::Store(Box::new(e))),
}
}
}
}
async fn set_raw(
&self,
_environment: &str,
key: &str,
val: &[u8],
) -> Result<(), SecretStoreError> {
info!(
"INFISICAL_STORE: Setting key '{key}' in environment '{}'",
self.environment
);
let value_str =
String::from_utf8(val.to_vec()).map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// --- Upsert Logic ---
// First, attempt to update the secret.
let update_req = UpdateSecretRequest::builder(key, &self.project_id, &self.environment)
.secret_value(&value_str)
.build();
match self.client.secrets().update(update_req).await {
Ok(_) => {
info!("INFISICAL_STORE: Successfully updated secret '{key}'.");
Ok(())
}
Err(e) => {
// If the update failed, check if it was because the secret doesn't exist.
match e {
InfisicalError::HttpError { status, .. }
if status == http::StatusCode::NOT_FOUND =>
{
// The secret was not found, so we create it instead.
warn!(
"INFISICAL_STORE: Secret '{key}' not found for update, attempting to create it."
);
let create_req = CreateSecretRequest::builder(
key,
&value_str,
&self.project_id,
&self.environment,
)
.build();
// Handle potential errors during creation.
self.client
.secrets()
.create(create_req)
.await
.map_err(|create_err| SecretStoreError::Store(Box::new(create_err)))?;
info!("INFISICAL_STORE: Successfully created secret '{key}'.");
Ok(())
}
// Any other error during update is a genuine failure.
_ => Err(SecretStoreError::Store(Box::new(e))),
}
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More