From 55bfe306ad6b88fd798bb2540971b8be6ff2b2a3 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Sat, 16 Aug 2025 11:13:32 -0400 Subject: [PATCH 01/11] feat: Secret module works with infisical and local file storage backends --- Cargo.lock | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 62d8aee..456ca45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2313,7 +2313,6 @@ dependencies = [ "temp-dir", "temp-file", "tempfile", - "thiserror 2.0.14", "tokio", "tokio-util", "url", @@ -3104,6 +3103,17 @@ dependencies = [ "libc", ] +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" From 56c181fc3df56ceae15ef839f9e9a5fdfb683549 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Mon, 18 Aug 2025 22:29:46 -0400 Subject: [PATCH 02/11] wip: OKD Installation automation layed out. Next step : review this after some sleep and fill in the (many) blanks with actual implementations. --- harmony/src/domain/interpret/mod.rs | 2 + harmony/src/modules/okd/installation.rs | 868 ++++++++++++++++++++++++ harmony/src/modules/okd/mod.rs | 1 + 3 files changed, 871 insertions(+) create mode 100644 harmony/src/modules/okd/installation.rs diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index 71d2f61..737bf28 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -32,6 +32,7 @@ pub enum InterpretName { K8sPrometheusCrdAlerting, DiscoverInventoryAgent, CephClusterHealth, + Custom(&'static str), } impl std::fmt::Display for InterpretName { @@ -60,6 +61,7 @@ impl std::fmt::Display for InterpretName { InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), + InterpretName::Custom(name) => f.write_str(name), } } } diff --git a/harmony/src/modules/okd/installation.rs b/harmony/src/modules/okd/installation.rs new file mode 100644 index 0000000..f9f59e2 --- /dev/null +++ b/harmony/src/modules/okd/installation.rs @@ -0,0 +1,868 @@ +//! OKDInstallationScore +//! +//! Overview +//! -------- +//! OKDInstallationScore orchestrates an end-to-end, bare-metal OKD (OpenShift/OKD 4.19) +//! installation using Harmony’s strongly-typed Scores and Interprets. It encodes the +//! “discovery-first, then provision” strategy with strict ordering, observable progress, +//! and minimal assumptions about the underlying network. +//! +//! Design goals +//! - Deterministic, observable pipeline from unknown hardware to a healthy OKD cluster. +//! - Do NOT require LACP bonding during PXE/inventory. Bonding is configured only +//! after the host has a stable OS on disk (SCOS/RHCOS) and OKD MachineConfigs/NNCP +//! can enforce persistence safely. +//! - Support per-MAC iPXE rendering without requiring multiple DHCP reservations for +//! the same host. Discovery runs with generic DHCP (access/unbonded). Role-specific +//! per-MAC PXE entries are activated just-in-time before install. +//! - Emit HarmonyEvent instrumentation at each step via the Score::interpret path. +//! +//! High-level flow +//! 1) OKDSetup01Inventory +//! - Serve default iPXE + Kickstart (in-RAM CentOS Stream 9) for discovery only. +//! - Enable SSH with the cluster’s ephemeral pubkey, start a Rust inventory agent. +//! - Harmony discovers nodes by scraping the agent endpoint and collects MACs/NICs. +//! - DNS: optionally register temporary hostnames and enable DHCP lease registration. +//! +//! 2) OKDSetup02Bootstrap +//! - User selects which discovered node becomes bootstrap. +//! - Render per-MAC iPXE for bootstrap with OKD 4.19 SCOS live assets + ignition. +//! - Reboot node via SSH; install bootstrap; wait for bootstrap-complete. +//! +//! 3) OKDSetup03ControlPlane +//! - Render per-MAC iPXE for cp0/cp1/cp2 with ignition (includes persistent bond via +//! MachineConfig or NNCP if required). Reboot via SSH, join masters. +//! +//! 4) OKDSetup04Workers +//! - Render per-MAC iPXE for worker set; join workers. +//! +//! 5) OKDSetup05SanityCheck +//! - Validate API/ingress/clusteroperators; ensure healthy control plane and SDN. +//! +//! 6) OKDSetup06InstallationReport +//! - Produce a concise, machine-readable report (JSON) and a human summary. +//! +//! Network notes +//! - During Inventory: ports must be simple access (no LACP). DHCP succeeds; iPXE +//! loads CentOS Stream live with Kickstart and starts the inventory endpoint. +//! - During Provisioning: only after SCOS is on disk and Ignition/MC can be applied +//! do we set the bond persistently. If early bonding is truly required on a host, +//! use kernel args selectively in the per-MAC PXE for that host, but never for the +//! generic discovery path. +//! +//! DNS and hostname +//! - Because a single host may present multiple MACs, but DHCP/ISC on OPNsense may not +//! easily support “one hostname across multiple MACs” in a single lease entry, we avoid +//! strict hostname binding during discovery. We rely on dynamic leases and record the +//! mapping (IP/MAC) at scrape time. +//! - Once a role is assigned, we render a per-MAC PXE entry and ensure the role-specific +//! DNS A/AAAA/CNAME entries are present (e.g., api, api-int, apps wildcard). This keeps +//! DHCP simple and DNS consistent for OKD. +//! +//! Instrumentation +//! - All child Scores are executed via Score::interpret, which emits HarmonyEvent +//! InterpretExecutionStarted/Finished. The orchestrator also emits HarmonyStarted/ +//! HarmonyFinished around the full pipeline execution. +//! +//! Configuration knobs +//! - lan_cidr: CIDR to scan/allow for discovery endpoints. +//! - public_domain: External wildcard/apps domain (e.g., apps.example.com). +//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd). +//! +//! Notes +//! - This file co-locates step Scores for ease of review. In follow-up changes, refactor +//! step Scores (OKDSetupXX*) into separate modules. + +use async_trait::async_trait; +use derive_new::new; +use harmony_macros::{ip, ipv4}; +use log::info; +use serde::{Deserialize, Serialize}; + +use crate::{ + data::Version, + instrumentation::{HarmonyEvent, instrument}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{DnsRecord, DnsRecordType, DnsServer, Topology}, +}; + +// ------------------------------------------------------------------------------------------------- +// Public Orchestrator Score +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, Deserialize, new)] +pub struct OKDInstallationScore { + /// The LAN CIDR where discovery endpoints live (e.g., 192.168.10.0/24) + pub lan_cidr: String, + /// Public external domain (e.g., example.com). Used for api/apps wildcard, etc. + pub public_domain: String, + /// Internal cluster domain (e.g., harmony.mcd). Used for internal svc/ingress and DNS. + pub internal_domain: String, +} + +impl Score for OKDInstallationScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDInstallationInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDInstallationScore".to_string() + } +} + +// ------------------------------------------------------------------------------------------------- +// Orchestrator Interpret +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone)] +pub struct OKDInstallationInterpret { + score: OKDInstallationScore, + version: Version, + status: InterpretStatus, +} + +impl OKDInstallationInterpret { + pub fn new(score: OKDInstallationScore) -> Self { + let version = Version::from("0.1.0").expect("valid version"); + Self { + score, + version, + status: InterpretStatus::QUEUED, + } + } + + async fn run_inventory_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + // 1) Prepare DNS and DHCP lease registration (optional) + let dns_score = OKDSetup01InventoryDnsScore::new( + self.score.internal_domain.clone(), + self.score.public_domain.clone(), + Some(true), // register_dhcp_leases + ); + dns_score.interpret(inventory, topology).await?; + + // 2) Serve default iPXE + Kickstart and poll discovery + let discovery_score = OKDSetup01InventoryScore::new(self.score.lan_cidr.clone()); + discovery_score.interpret(inventory, topology).await?; + + Ok(()) + } + + async fn run_bootstrap_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + // Select and provision bootstrap + let bootstrap_score = OKDSetup02BootstrapScore::new( + self.score.public_domain.clone(), + self.score.internal_domain.clone(), + ); + bootstrap_score.interpret(inventory, topology).await?; + Ok(()) + } + + async fn run_control_plane_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + let control_plane_score = OKDSetup03ControlPlaneScore::new(); + control_plane_score.interpret(inventory, topology).await?; + Ok(()) + } + + async fn run_workers_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + let workers_score = OKDSetup04WorkersScore::new(); + workers_score.interpret(inventory, topology).await?; + Ok(()) + } + + async fn run_sanity_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + let sanity_score = OKDSetup05SanityCheckScore::new(); + sanity_score.interpret(inventory, topology).await?; + Ok(()) + } + + async fn run_report_phase( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result<(), InterpretError> { + let report_score = OKDSetup06InstallationReportScore::new( + self.score.public_domain.clone(), + self.score.internal_domain.clone(), + ); + report_score.interpret(inventory, topology).await?; + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDInstallationInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDInstallationInterpret") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + instrument(HarmonyEvent::HarmonyStarted).ok(); + + info!( + "Starting OKD installation pipeline for public_domain={} internal_domain={} lan_cidr={}", + self.score.public_domain, self.score.internal_domain, self.score.lan_cidr + ); + + // 1) Inventory (default PXE, in-RAM kickstart, Rust inventory agent) + self.run_inventory_phase(inventory, topology).await?; + + // 2) Bootstrap (render per-MAC iPXE + ignition; reboot node; wait for bootstrap complete) + self.run_bootstrap_phase(inventory, topology).await?; + + // 3) Control plane + self.run_control_plane_phase(inventory, topology).await?; + + // 4) Workers + self.run_workers_phase(inventory, topology).await?; + + // 5) Sanity checks + self.run_sanity_phase(inventory, topology).await?; + + // 6) Installation report + self.run_report_phase(inventory, topology).await?; + + instrument(HarmonyEvent::HarmonyFinished).ok(); + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "OKD installation pipeline completed".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 01: Inventory DNS setup +// - Keep DHCP simple; optionally register dynamic leases into DNS. +// - Ensure base records for internal/public domains (api/api-int/apps wildcard). +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup01InventoryDnsScore { + internal_domain: String, + public_domain: String, + register_dhcp_leases: Option, +} + +impl Score for OKDSetup01InventoryDnsScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup01InventoryDnsInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup01InventoryDnsScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup01InventoryDnsInterpret { + score: OKDSetup01InventoryDnsScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup01InventoryDnsInterpret { + pub fn new(score: OKDSetup01InventoryDnsScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn ensure_dns(&self, dns: &T) -> Result<(), InterpretError> { + // Minimal records placeholders; real IPs are set elsewhere in the flow. + // We register the names early to ensure resolvability for clients relying on DNS. + let mut records: Vec = vec![ + DnsRecord { + value: ip!("0.0.0.0"), + host: "api".to_string(), + domain: self.score.internal_domain.clone(), + record_type: DnsRecordType::A, + }, + DnsRecord { + value: ip!("0.0.0.0"), + host: "api-int".to_string(), + domain: self.score.internal_domain.clone(), + record_type: DnsRecordType::A, + }, + DnsRecord { + value: ip!("0.0.0.0"), + host: "*.apps.".to_string(), + domain: self.score.internal_domain.clone(), + record_type: DnsRecordType::A, + }, + ]; + dns.ensure_hosts_registered(records.drain(..).collect()) + .await?; + if let Some(register) = self.score.register_dhcp_leases { + dns.register_dhcp_leases(register).await?; + } + dns.commit_config().await?; + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup01InventoryDnsInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup01InventoryDns") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + info!("Ensuring base DNS and DHCP lease registration for discovery phase"); + self.ensure_dns(topology).await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Inventory DNS prepared".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent) +// - This score exposes/ensures the default inventory assets and waits for discoveries. +// - No early bonding. Simple access DHCP. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup01InventoryScore { + lan_cidr: String, +} + +impl Score for OKDSetup01InventoryScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup01InventoryInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup01InventoryScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup01InventoryInterpret { + score: OKDSetup01InventoryScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup01InventoryInterpret { + pub fn new(score: OKDSetup01InventoryScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn ensure_inventory_assets( + &self, + topology: &T, + ) -> Result<(), InterpretError> { + // Placeholder: push or verify iPXE default, Kickstart, and Rust inventory agent are hosted. + // Real implementation: publish to the PXE/HTTP server via the topology. + info!( + "[Inventory] Ensuring default iPXE, Kickstart, and inventory agent are available for LAN {}", + self.score.lan_cidr + ); + // topology.publish_http_asset(…) ? + Ok(()) + } + + async fn discover_nodes(&self) -> Result { + // Placeholder: implement Harmony discovery logic (scan/pull/push mode). + // Returns number of newly discovered nodes. + info!( + "[Inventory] Scanning for inventory agents in {}", + self.score.lan_cidr + ); + // In practice, this would query harmony_composer or a local registry store. + Ok(3) + } +} + +#[async_trait] +impl Interpret for OKDSetup01InventoryInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup01Inventory") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + self.ensure_inventory_assets(topology).await?; + let count = self.discover_nodes().await?; + info!("[Inventory] Discovered {count} nodes"); + Ok(Outcome::new( + InterpretStatus::SUCCESS, + format!("Inventory phase complete. Nodes discovered: {count}"), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 02: Bootstrap +// - Select bootstrap node (from discovered set). +// - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition. +// - Reboot the host via SSH and wait for bootstrap-complete. +// - No bonding at this stage unless absolutely required; prefer persistence via MC later. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup02BootstrapScore { + public_domain: String, + internal_domain: String, +} + +impl Score for OKDSetup02BootstrapScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup02BootstrapInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup02BootstrapScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup02BootstrapInterpret { + score: OKDSetup02BootstrapScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup02BootstrapInterpret { + pub fn new(score: OKDSetup02BootstrapScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn render_per_mac_pxe(&self) -> Result<(), InterpretError> { + // Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition. + info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); + Ok(()) + } + + async fn reboot_target(&self) -> Result<(), InterpretError> { + // Placeholder: ssh reboot using the inventory ephemeral key + info!("[Bootstrap] Rebooting bootstrap node via SSH"); + Ok(()) + } + + async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> { + // Placeholder: wait-for bootstrap-complete + info!("[Bootstrap] Waiting for bootstrap-complete …"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup02BootstrapInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup02Bootstrap") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + self.render_per_mac_pxe().await?; + self.reboot_target().await?; + self.wait_for_bootstrap_complete().await?; + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Bootstrap phase complete".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 03: Control Plane +// - Render per-MAC PXE & ignition for cp0/cp1/cp2. +// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup03ControlPlaneScore {} + +impl Score for OKDSetup03ControlPlaneScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup03ControlPlaneScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup03ControlPlaneInterpret { + score: OKDSetup03ControlPlaneScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup03ControlPlaneInterpret { + pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn render_and_reboot(&self) -> Result<(), InterpretError> { + info!("[ControlPlane] Rendering per-MAC PXE for masters and rebooting"); + Ok(()) + } + + async fn persist_network_bond(&self) -> Result<(), InterpretError> { + // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. + info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup03ControlPlaneInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup03ControlPlane") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + self.render_and_reboot().await?; + self.persist_network_bond().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Control plane provisioned".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 04: Workers +// - Render per-MAC PXE & ignition for workers; join nodes. +// - Persist bonding via MC/NNCP as required (same approach as masters). +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup04WorkersScore {} + +impl Score for OKDSetup04WorkersScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup04WorkersInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup04WorkersScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup04WorkersInterpret { + score: OKDSetup04WorkersScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup04WorkersInterpret { + pub fn new(score: OKDSetup04WorkersScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn render_and_reboot(&self) -> Result<(), InterpretError> { + info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup04WorkersInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup04Workers") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + self.render_and_reboot().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Workers provisioned".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 05: Sanity Check +// - Validate API reachability, ClusterOperators, ingress, and SDN status. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup05SanityCheckScore {} + +impl Score for OKDSetup05SanityCheckScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup05SanityCheckScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup05SanityCheckInterpret { + score: OKDSetup05SanityCheckScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup05SanityCheckInterpret { + pub fn new(score: OKDSetup05SanityCheckScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn run_checks(&self) -> Result<(), InterpretError> { + info!("[Sanity] Checking API, COs, Ingress, and SDN health …"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup05SanityCheckInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup05SanityCheck") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + self.run_checks().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Sanity checks passed".into(), + )) + } +} + +// ------------------------------------------------------------------------------------------------- +// Step 06: Installation Report +// - Emit JSON and concise human summary of nodes, roles, versions, and health. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +struct OKDSetup06InstallationReportScore { + public_domain: String, + internal_domain: String, +} + +impl Score for OKDSetup06InstallationReportScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup06InstallationReportScore".to_string() + } +} + +#[derive(Debug, Clone)] +struct OKDSetup06InstallationReportInterpret { + score: OKDSetup06InstallationReportScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup06InstallationReportInterpret { + pub fn new(score: OKDSetup06InstallationReportScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn generate(&self) -> Result<(), InterpretError> { + info!( + "[Report] Generating installation report for {} / {}", + self.score.public_domain, self.score.internal_domain + ); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup06InstallationReportInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup06InstallationReport") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + self.generate().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Installation report generated".into(), + )) + } +} diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index fe61b1e..2cab1ad 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -3,5 +3,6 @@ pub mod bootstrap_load_balancer; pub mod dhcp; pub mod dns; pub mod ipxe; +pub mod installation; pub mod load_balancer; pub mod upgrade; From c372e781d887ee7e0d8249ee6d0db39003b9f92d Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Tue, 19 Aug 2025 12:42:15 -0400 Subject: [PATCH 03/11] doc(okdinstallationscore): Fix incorrect comments and remove some more useless comments --- harmony/src/modules/okd/installation.rs | 55 ++++++------------------- 1 file changed, 12 insertions(+), 43 deletions(-) diff --git a/harmony/src/modules/okd/installation.rs b/harmony/src/modules/okd/installation.rs index f9f59e2..bacc849 100644 --- a/harmony/src/modules/okd/installation.rs +++ b/harmony/src/modules/okd/installation.rs @@ -2,39 +2,29 @@ //! //! Overview //! -------- -//! OKDInstallationScore orchestrates an end-to-end, bare-metal OKD (OpenShift/OKD 4.19) -//! installation using Harmony’s strongly-typed Scores and Interprets. It encodes the -//! “discovery-first, then provision” strategy with strict ordering, observable progress, -//! and minimal assumptions about the underlying network. -//! -//! Design goals -//! - Deterministic, observable pipeline from unknown hardware to a healthy OKD cluster. -//! - Do NOT require LACP bonding during PXE/inventory. Bonding is configured only -//! after the host has a stable OS on disk (SCOS/RHCOS) and OKD MachineConfigs/NNCP -//! can enforce persistence safely. -//! - Support per-MAC iPXE rendering without requiring multiple DHCP reservations for -//! the same host. Discovery runs with generic DHCP (access/unbonded). Role-specific -//! per-MAC PXE entries are activated just-in-time before install. -//! - Emit HarmonyEvent instrumentation at each step via the Score::interpret path. +//! OKDInstallationScore orchestrates an end-to-end, bare-metal OKD (OpenShift/OKD 4.19). +//! It follows principles of “discovery-first, then provision” strategy with strict ordering, +//! observable progress, and minimal assumptions about the underlying network. //! //! High-level flow //! 1) OKDSetup01Inventory //! - Serve default iPXE + Kickstart (in-RAM CentOS Stream 9) for discovery only. -//! - Enable SSH with the cluster’s ephemeral pubkey, start a Rust inventory agent. +//! - Enable SSH with the cluster’s pubkey, start a Rust inventory agent. //! - Harmony discovers nodes by scraping the agent endpoint and collects MACs/NICs. -//! - DNS: optionally register temporary hostnames and enable DHCP lease registration. //! //! 2) OKDSetup02Bootstrap //! - User selects which discovered node becomes bootstrap. +//! - Prepare the OKD cluster installation files //! - Render per-MAC iPXE for bootstrap with OKD 4.19 SCOS live assets + ignition. //! - Reboot node via SSH; install bootstrap; wait for bootstrap-complete. //! //! 3) OKDSetup03ControlPlane -//! - Render per-MAC iPXE for cp0/cp1/cp2 with ignition (includes persistent bond via -//! MachineConfig or NNCP if required). Reboot via SSH, join masters. +//! - Render per-MAC iPXE for cp0/cp1/cp2 with ignition. Reboot via SSH, join masters. +//! - Configure network bond (where relevant) using OKD NMState MachineConfig //! //! 4) OKDSetup04Workers //! - Render per-MAC iPXE for worker set; join workers. +//! - Configure network bond (where relevant) using OKD NMState MachineConfig //! //! 5) OKDSetup05SanityCheck //! - Validate API/ingress/clusteroperators; ensure healthy control plane and SDN. @@ -49,33 +39,18 @@ //! do we set the bond persistently. If early bonding is truly required on a host, //! use kernel args selectively in the per-MAC PXE for that host, but never for the //! generic discovery path. -//! -//! DNS and hostname -//! - Because a single host may present multiple MACs, but DHCP/ISC on OPNsense may not -//! easily support “one hostname across multiple MACs” in a single lease entry, we avoid -//! strict hostname binding during discovery. We rely on dynamic leases and record the -//! mapping (IP/MAC) at scrape time. -//! - Once a role is assigned, we render a per-MAC PXE entry and ensure the role-specific -//! DNS A/AAAA/CNAME entries are present (e.g., api, api-int, apps wildcard). This keeps -//! DHCP simple and DNS consistent for OKD. -//! -//! Instrumentation -//! - All child Scores are executed via Score::interpret, which emits HarmonyEvent -//! InterpretExecutionStarted/Finished. The orchestrator also emits HarmonyStarted/ -//! HarmonyFinished around the full pipeline execution. +//! - This is caused by the inherent race condition between PXE, which cannot perform +//! its DHCP recovery process on a bonded network, and the bond configuration itself, +//! which must be configured on host AND switch to connect properly. //! //! Configuration knobs //! - lan_cidr: CIDR to scan/allow for discovery endpoints. //! - public_domain: External wildcard/apps domain (e.g., apps.example.com). //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd). -//! -//! Notes -//! - This file co-locates step Scores for ease of review. In follow-up changes, refactor -//! step Scores (OKDSetupXX*) into separate modules. use async_trait::async_trait; use derive_new::new; -use harmony_macros::{ip, ipv4}; +use harmony_macros::ip; use log::info; use serde::{Deserialize, Serialize}; @@ -241,22 +216,16 @@ impl Interpret for OKDInstallationInterpret { self.score.public_domain, self.score.internal_domain, self.score.lan_cidr ); - // 1) Inventory (default PXE, in-RAM kickstart, Rust inventory agent) self.run_inventory_phase(inventory, topology).await?; - // 2) Bootstrap (render per-MAC iPXE + ignition; reboot node; wait for bootstrap complete) self.run_bootstrap_phase(inventory, topology).await?; - // 3) Control plane self.run_control_plane_phase(inventory, topology).await?; - // 4) Workers self.run_workers_phase(inventory, topology).await?; - // 5) Sanity checks self.run_sanity_phase(inventory, topology).await?; - // 6) Installation report self.run_report_phase(inventory, topology).await?; instrument(HarmonyEvent::HarmonyFinished).ok(); From b6be44202e8f7c66ea5e65cc5fb01b679285c2fd Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Mon, 1 Sep 2025 14:14:29 -0400 Subject: [PATCH 04/11] chore: rebase okd installation with refactoring on core types --- Cargo.lock | 12 +----------- harmony/src/modules/okd/installation.rs | 17 +++++++++-------- harmony/src/modules/okd/mod.rs | 2 +- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 456ca45..62d8aee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2313,6 +2313,7 @@ dependencies = [ "temp-dir", "temp-file", "tempfile", + "thiserror 2.0.14", "tokio", "tokio-util", "url", @@ -3103,17 +3104,6 @@ dependencies = [ "libc", ] -[[package]] -name = "io-uring" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" -dependencies = [ - "bitflags 2.9.1", - "cfg-if", - "libc", -] - [[package]] name = "ipnet" version = "2.11.0" diff --git a/harmony/src/modules/okd/installation.rs b/harmony/src/modules/okd/installation.rs index bacc849..58b6942 100644 --- a/harmony/src/modules/okd/installation.rs +++ b/harmony/src/modules/okd/installation.rs @@ -51,6 +51,7 @@ use async_trait::async_trait; use derive_new::new; use harmony_macros::ip; +use harmony_types::id::Id; use log::info; use serde::{Deserialize, Serialize}; @@ -200,7 +201,7 @@ impl Interpret for OKDInstallationInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -324,7 +325,7 @@ impl Interpret for OKDSetup01InventoryDnsInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -420,7 +421,7 @@ impl Interpret for OKDSetup01InventoryInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -513,7 +514,7 @@ impl Interpret for OKDSetup02BootstrapInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -595,7 +596,7 @@ impl Interpret for OKDSetup03ControlPlaneInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -669,7 +670,7 @@ impl Interpret for OKDSetup04WorkersInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -741,7 +742,7 @@ impl Interpret for OKDSetup05SanityCheckInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } @@ -819,7 +820,7 @@ impl Interpret for OKDSetup06InstallationReportInterpret { self.status.clone() } - fn get_children(&self) -> Vec { + fn get_children(&self) -> Vec { vec![] } diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index 2cab1ad..b5ba462 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -2,7 +2,7 @@ pub mod bootstrap_dhcp; pub mod bootstrap_load_balancer; pub mod dhcp; pub mod dns; -pub mod ipxe; pub mod installation; +pub mod ipxe; pub mod load_balancer; pub mod upgrade; From 0a5da43c768f339687d884cc14a2b11c79ec9bd6 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Thu, 4 Sep 2025 14:28:57 -0400 Subject: [PATCH 05/11] demo: describe the storyline of the talk --- .../storyline.md | 132 ++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 demos/cncf-k8s-quebec-meetup-september-2025/storyline.md diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/storyline.md b/demos/cncf-k8s-quebec-meetup-september-2025/storyline.md new file mode 100644 index 0000000..43718e0 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/storyline.md @@ -0,0 +1,132 @@ +# Harmony, Orchestrateur d'infrastructure open-source + +**Target Duration:** 25 minutes\ +**Tone:** Friendly, expert-to-expert, inspiring. + +--- + +#### **Slide 1: Title Slide** + +- **Visual:** Clean and simple. Your company logo (NationTech) and the Harmony logo. + +--- + +#### **Slide 2: The YAML Labyrinth** + +**Goal:** Get every head in the room nodding in agreement. Start with their world, not yours. + +- **Visual:** + - Option A: "The Pull Request from Hell". A screenshot of a GitHub pull request for a seemingly minor change that touches dozens of YAML files across multiple directories. A sea of red and green diffs that is visually overwhelming. + - Option B: A complex flowchart connecting dozens of logos: Terraform, Ansible, K8s, Helm, etc. +- **Narration:**\ + [...ADD SOMETHING FOR INTRODUCTION...]\ + "We love the power that tools like Kubernetes and the CNCF landscape have given us. But let's be honest... when did our infrastructure code start looking like _this_?"\ + "We have GitOps, which is great. But it often means we're managing this fragile cathedral of YAML, Helm charts, and brittle scripts. We spend more time debugging indentation and tracing variables than we do building truly resilient systems." + +--- + +#### **Slide 3: The Real Cost of Infrastructure** + +- **Visual:** "The Jenga Tower of Tools". A tall, precarious Jenga tower where each block is the logo of a different tool (Terraform, K8s, Helm, Ansible, Prometheus, ArgoCD, etc.). One block near the bottom is being nervously pulled out. +- **Narration:** + "The real cost isn't just complexity; it's the constant need to choose, learn, integrate, and operate a dozen different tools, each with its own syntax and failure modes. It's the nagging fear that a tiny typo in a config file could bring everything down. Click-ops isn't the answer, but the current state of IaC feels like we've traded one problem for another." + +--- + +#### **Slide 4: The Broken Promise of "Code"** + +**Goal:** Introduce the core idea before introducing the product. This makes the solution feel inevitable. + +- **(Initial Visual):** A two-panel slide. + - **Left Panel Title: "The Plan"** - A terminal showing a green, successful `terraform plan` output. + - **Right Panel Title: "The Reality"** - The _next_ screen in the terminal, showing the `terraform apply` failing with a cascade of red error text. +- **Narration:** + "We call our discipline **Infrastructure as Code**. And we've all been here. Our 'compiler' is a `terraform plan` that says everything looks perfect. We get the green light." + (Pause for a beat) + "And then we `apply`, and reality hits. It fails halfway through, at runtime, when it's most expensive and painful to fix." + +**(Click to transition the slide)** + +- **(New Visual):** The entire slide is replaced by a clean screenshot of a code editor (like nvim 😉) showing Harmony's Rust DSL. A red squiggly line is under a config line. The error message is clear in the "Problems" panel: `error: Incompatible deployment. Production target 'gcp-prod-cluster' requires a StorageClass with 'snapshots' capability, but 'standard-sc' does not provide it.` +- **Narration (continued):** + "In software development, we solved these problems years ago. We don't accept 'it compiled, but crashed on startup'. We have real tools, type systems, compilers, test frameworks, and IDEs that catch our mistakes before they ever reach production. **So, what if we could treat our entire infrastructure... like a modern, compiled application?**" + "What if your infrastructure code could get compile-time checks, straight into the editor... instead of runtime panics and failures at 3 AM in production?" + +--- + +#### **Slide 5: Introducing Harmony** + +**Goal:** Introduce Harmony as the answer to the "What If?" question. + +- **Visual:** The Harmony logo, large and centered. +- **Tagline:** `Infrastructure in type-safe Rust. No YAML required.` +- **Narration:** + "This is Harmony. It's an open-source orchestrator that lets you define your entire stack — from a dev laptop to a multi-site bare-metal cluster—in a single, type-safe Rust codebase." + +--- + +#### **Slide 6: Before & After** + +- **Visual:** A side-by-side comparison. Left side: A screen full of complex, nested YAML. Right side: 10-15 lines of clean, readable Harmony Rust DSL that accomplishes the same thing. +- **Narration:** + "This is the difference. On the left, the fragile world of strings and templates. On the right, a portable, verifiable program that describes your apps, your infra, and your operations. We unify scaffolding, provisioning, and Day-2 ops, all verified by the Rust compiler. But enough slides... let's see it in action." + +--- + +#### **Slide 7: Live Demo: Zero to Monitored App** + +**Goal:** Show, don't just tell. Make it look effortless. This is where you build the "dream." + +- **Visual:** Your terminal/IDE, ready to go. +- **Narration Guide:** + "Okay, for this demo, we're going to take a standard web app from GitHub. Nothing special about it." + _(Show the repo)_ + "Now, let's bring it into Harmony. This is the entire definition we need to describe the application and its needs." + _(Show the Rust DSL)_ + "First, let's run it locally on k3d. The exact same definition for dev as for prod." + _(Deploy locally, show it works)_ + "Cool. But a real app needs monitoring. In Harmony, that's just adding a feature to our code." + _(Uncomment one line: `.with_feature(Monitoring)` and redeploy)_ + "And just like that, we have a fully configured Prometheus and Grafana stack, scraping our app. No YAML, no extra config." + "Finally, let's push this to our production staging cluster. We just change the target and specify our multi-site Ceph storage." + _(Deploy to the remote cluster)_ + "And there it is. We've gone from a simple web app to a monitored, enterprise-grade service in minutes." + +--- + +#### **Slide 8: Live Demo: Embracing Chaos** + +**Goal:** Prove the "predictable" and "resilient" claims in the most dramatic way possible. + +- **Visual:** A slide showing a map or diagram of your distributed infrastructure (the different data centers). Then switch back to your terminal. +- **Narration Guide:** + "This is great when things are sunny. But production is chaos. So... let's break things. On purpose." + "First, a network failure." _(Kill a switch/link, show app is still up)_ + "Now, let's power off a storage server." _(Force off a server, show Ceph healing and the app is unaffected)_ + "How about a control plane node?" _(Force off a k8s control plane, show the cluster is still running)_ + "Okay, for the grand finale. What if we have a cascading failure? I'm going to kill _another_ storage server. This should cause a total failure in this data center." + _(Force off the second server, narrate what's happening)_ + "And there it is... Ceph has lost quorum in this site... and Harmony has automatically failed everything over to our other datacenter. The app is still running." + +--- + +#### **Slide 9: The New Reality** + +**Goal:** Summarize the dream and tell the audience what you want them to do. + +- **Visual:** The clean, simple Harmony Rust DSL code from Slide 6. A summary of what was just accomplished is listed next to it: `✓ GitHub to Prod in minutes`, `✓ Type-Safe Validation`, `✓ Built-in Monitoring`, `✓ Automated Multi-Site Failover`. +- **Narration:** + "So, in just a few minutes, we went from a simple web app to a multi-site, monitored, and chaos-proof production deployment. We did it with a small amount of code that is easy to read, easy to verify, and completely portable. This is our vision: to offload the complexity, and make infrastructure simple, predictable, and even fun again." + +--- + +#### **Slide 10: Join Us** + +- **Visual:** A clean, final slide with QR codes and links. + - GitHub Repo (`github.com/nation-tech/harmony`) + - Website (`harmony.sh` or similar) + - Your contact info (`jg@nation.tech` / LinkedIn / Twitter) +- **Narration:** + "Harmony is open-source, AGPLv3. We believe this is the future, but we're just getting started. We know this crowd has great infrastructure minds out there, and we need your feedback. Please, check out the project on GitHub. Star it if you like what you see. Tell us what's missing. Let's build this future together. Thank you." + +**(Open for Q&A)** From ad2ae2e4f86eb13016c9e2c4e3a4f5d0a014044e Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 8 Sep 2025 13:52:25 +0000 Subject: [PATCH 06/11] feat(example): added an example of packaging a rust app from github (#124) * better caching when building docker images for app Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/124 Reviewed-by: johnride Co-authored-by: Willem Co-committed-by: Willem --- .gitmodules | 3 + Cargo.lock | 44 +++ .../src/main.rs | 1 + examples/rust/src/main.rs | 3 +- examples/try_rust_webapp/Cargo.toml | 17 ++ examples/try_rust_webapp/src/main.rs | 52 ++++ examples/try_rust_webapp/tryrust.org | 1 + harmony/Cargo.toml | 1 + harmony/src/modules/application/rust.rs | 270 ++++++++++++------ 9 files changed, 299 insertions(+), 93 deletions(-) create mode 100644 .gitmodules create mode 100644 examples/try_rust_webapp/Cargo.toml create mode 100644 examples/try_rust_webapp/src/main.rs create mode 160000 examples/try_rust_webapp/tryrust.org diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..4438aa2 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "examples/try_rust_webapp/tryrust.org"] + path = examples/try_rust_webapp/tryrust.org + url = https://github.com/rust-dd/tryrust.org.git diff --git a/Cargo.lock b/Cargo.lock index 62d8aee..8cdfe97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1838,6 +1838,21 @@ dependencies = [ "url", ] +[[package]] +name = "example-try-rust-webapp" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "env_logger", + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "log", + "tokio", + "url", +] + [[package]] name = "example-tui" version = "0.1.0" @@ -2318,6 +2333,7 @@ dependencies = [ "tokio-util", "url", "uuid", + "walkdir", ] [[package]] @@ -4955,6 +4971,15 @@ dependencies = [ "cipher", ] +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.27" @@ -6494,6 +6519,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -6676,6 +6711,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +dependencies = [ + "windows-sys 0.60.2", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/examples/application_monitoring_with_tenant/src/main.rs b/examples/application_monitoring_with_tenant/src/main.rs index 7e60703..f46a993 100644 --- a/examples/application_monitoring_with_tenant/src/main.rs +++ b/examples/application_monitoring_with_tenant/src/main.rs @@ -30,6 +30,7 @@ async fn main() { domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()), project_root: PathBuf::from("./examples/rust/webapp"), framework: Some(RustWebFramework::Leptos), + service_port: 3000, }); let webhook_receiver = WebhookReceiver { diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index b361edd..031887b 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -20,8 +20,9 @@ async fn main() { let application = Arc::new(RustWebapp { name: "harmony-example-rust-webapp".to_string(), domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()), - project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param + project_root: PathBuf::from("./webapp"), framework: Some(RustWebFramework::Leptos), + service_port: 3000, }); let discord_receiver = DiscordWebhook { diff --git a/examples/try_rust_webapp/Cargo.toml b/examples/try_rust_webapp/Cargo.toml new file mode 100644 index 0000000..fc4f8a1 --- /dev/null +++ b/examples/try_rust_webapp/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "example-try-rust-webapp" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_types = { path = "../../harmony_types" } +harmony_macros = { path = "../../harmony_macros" } +tokio = { workspace = true } +log = { workspace = true } +env_logger = { workspace = true } +url = { workspace = true } +base64.workspace = true diff --git a/examples/try_rust_webapp/src/main.rs b/examples/try_rust_webapp/src/main.rs new file mode 100644 index 0000000..6e1ab63 --- /dev/null +++ b/examples/try_rust_webapp/src/main.rs @@ -0,0 +1,52 @@ +use std::{path::PathBuf, sync::Arc}; + +use harmony::{ + inventory::Inventory, + modules::{ + application::{ + ApplicationScore, RustWebFramework, RustWebapp, + features::{ContinuousDelivery, Monitoring}, + }, + monitoring::alert_channel::discord_alert_channel::DiscordWebhook, + }, + topology::K8sAnywhereTopology, +}; +use harmony_types::net::Url; + +#[tokio::main] +async fn main() { + let application = Arc::new(RustWebapp { + name: "harmony-example-tryrust".to_string(), + domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()), + project_root: PathBuf::from("./tryrust.org"), + framework: Some(RustWebFramework::Leptos), + service_port: 8080, + }); + + let discord_receiver = DiscordWebhook { + name: "test-discord".to_string(), + url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()), + }; + + let app = ApplicationScore { + features: vec![ + Box::new(ContinuousDelivery { + application: application.clone(), + }), + Box::new(Monitoring { + application: application.clone(), + alert_receiver: vec![Box::new(discord_receiver)], + }), + ], + application, + }; + + harmony_cli::run( + Inventory::autoload(), + K8sAnywhereTopology::from_env(), + vec![Box::new(app)], + None, + ) + .await + .unwrap(); +} diff --git a/examples/try_rust_webapp/tryrust.org b/examples/try_rust_webapp/tryrust.org new file mode 160000 index 0000000..0f9ba14 --- /dev/null +++ b/examples/try_rust_webapp/tryrust.org @@ -0,0 +1 @@ +Subproject commit 0f9ba145172867f467e5320b37d07a5bbb7dd438 diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index 07a2480..9372a7b 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -66,6 +66,7 @@ tar.workspace = true base64.workspace = true thiserror.workspace = true once_cell = "1.21.3" +walkdir = "2.5.0" harmony_inventory_agent = { path = "../harmony_inventory_agent" } harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } askama.workspace = true diff --git a/harmony/src/modules/application/rust.rs b/harmony/src/modules/application/rust.rs index 40c85bb..0d204cc 100644 --- a/harmony/src/modules/application/rust.rs +++ b/harmony/src/modules/application/rust.rs @@ -1,4 +1,5 @@ -use std::fs; +use std::fs::{self, File}; +use std::io::Read; use std::path::{Path, PathBuf}; use std::process; use std::sync::Arc; @@ -12,7 +13,8 @@ use dockerfile_builder::instruction_builder::CopyBuilder; use futures_util::StreamExt; use log::{debug, info, log_enabled}; use serde::Serialize; -use tar::Archive; +use tar::{Archive, Builder, Header}; +use walkdir::WalkDir; use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; use crate::{score::Score, topology::Topology}; @@ -59,6 +61,7 @@ pub struct RustWebapp { pub domain: Url, /// The path to the root of the Rust project to be containerized. pub project_root: PathBuf, + pub service_port: u32, pub framework: Option, } @@ -158,45 +161,99 @@ impl RustWebapp { image_name: &str, ) -> Result> { debug!("Generating Dockerfile for '{}'", self.name); - let _dockerfile_path = self.build_dockerfile()?; - - let docker = Docker::connect_with_socket_defaults().unwrap(); - + let dockerfile = self.get_or_build_dockerfile(); let quiet = !log_enabled!(log::Level::Debug); - - let build_image_options = bollard::query_parameters::BuildImageOptionsBuilder::default() - .dockerfile("Dockerfile.harmony") - .t(image_name) - .q(quiet) - .version(bollard::query_parameters::BuilderVersion::BuilderV1) - .platform("linux/x86_64"); - - let mut temp_tar_builder = tar::Builder::new(Vec::new()); - temp_tar_builder - .append_dir_all("", self.project_root.clone()) - .unwrap(); - let archive = temp_tar_builder - .into_inner() - .expect("couldn't finish creating tar"); - let archived_files = Archive::new(archive.as_slice()) - .entries() + match dockerfile .unwrap() - .map(|entry| entry.unwrap().path().unwrap().into_owned()) - .collect::>(); + .file_name() + .and_then(|os_str| os_str.to_str()) + { + Some(path_str) => { + debug!("Building from dockerfile {}", path_str); - debug!("files in docker tar: {:#?}", archived_files); + let tar_data = self + .create_deterministic_tar(&self.project_root.clone()) + .await + .unwrap(); - let mut image_build_stream = docker.build_image( - build_image_options.build(), - None, - Some(body_full(archive.into())), - ); + let docker = Docker::connect_with_socket_defaults().unwrap(); - while let Some(msg) = image_build_stream.next().await { - debug!("Message: {msg:?}"); + let build_image_options = + bollard::query_parameters::BuildImageOptionsBuilder::default() + .dockerfile(path_str) + .t(image_name) + .q(quiet) + .version(bollard::query_parameters::BuilderVersion::BuilderV1) + .platform("linux/x86_64"); + + let mut image_build_stream = docker.build_image( + build_image_options.build(), + None, + Some(body_full(tar_data.into())), + ); + + while let Some(msg) = image_build_stream.next().await { + debug!("Message: {msg:?}"); + } + + Ok(image_name.to_string()) + } + + None => Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Path is not valid UTF-8", + ))), } + } - Ok(image_name.to_string()) + ///normalizes timestamp and ignores files that will bust the docker cach + async fn create_deterministic_tar( + &self, + project_root: &std::path::Path, + ) -> Result, Box> { + debug!("building tar file from project root {:#?}", project_root); + let mut tar_data = Vec::new(); + { + let mut builder = Builder::new(&mut tar_data); + let ignore_prefixes = [ + "target", + ".git", + ".github", + ".harmony_generated", + "node_modules", + ]; + let mut entries: Vec<_> = WalkDir::new(project_root) + .into_iter() + .filter_map(Result::ok) + .filter(|e| e.file_type().is_file()) + .filter(|e| { + let rel_path = e.path().strip_prefix(project_root).unwrap(); + !ignore_prefixes + .iter() + .any(|prefix| rel_path.starts_with(prefix)) + }) + .collect(); + entries.sort_by_key(|e| e.path().to_owned()); + + for entry in entries { + let path = entry.path(); + let rel_path = path.strip_prefix(project_root).unwrap(); + + let mut file = fs::File::open(path)?; + let mut header = Header::new_gnu(); + + header.set_size(entry.metadata()?.len()); + header.set_mode(0o644); + header.set_mtime(0); + header.set_uid(0); + header.set_gid(0); + + builder.append_data(&mut header, rel_path, &mut file)?; + } + + builder.finish()?; + } + Ok(tar_data) } /// Tags and pushes a Docker image to the configured remote registry. @@ -272,8 +329,11 @@ impl RustWebapp { "groupadd -r appgroup && useradd -r -s /bin/false -g appgroup appuser", )); - dockerfile.push(ENV::from("LEPTOS_SITE_ADDR=0.0.0.0:3000")); - dockerfile.push(EXPOSE::from("3000/tcp")); + dockerfile.push(ENV::from(format!( + "LEPTOS_SITE_ADDR=0.0.0.0:{}", + self.service_port + ))); + dockerfile.push(EXPOSE::from(format!("{}/tcp", self.service_port))); dockerfile.push(WORKDIR::from("/home/appuser")); // Copy static files @@ -394,7 +454,7 @@ image: service: type: ClusterIP - port: 3000 + port: {} ingress: enabled: true @@ -414,112 +474,123 @@ ingress: - chart-example.local "#, - chart_name, image_repo, image_tag, self.name + chart_name, image_repo, image_tag, self.service_port, self.name ); fs::write(chart_dir.join("values.yaml"), values_yaml)?; // Create templates/_helpers.tpl - let helpers_tpl = r#" -{{/* + let helpers_tpl = format!( + r#" +{{{{/* Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} +*/}}}} +{{{{- define "chart.name" -}}}} +{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}} +{{{{- end }}}} -{{/* +{{{{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "chart.fullname" -}} -{{- $name := default .Chart.Name $.Values.nameOverride }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -"#; +*/}}}} +{{{{- define "chart.fullname" -}}}} +{{{{- $name := default .Chart.Name $.Values.nameOverride }}}} +{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}} +{{{{- end }}}} +"# + ); fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?; // Create templates/service.yaml - let service_yaml = r#" + let service_yaml = format!( + r#" apiVersion: v1 kind: Service metadata: - name: {{ include "chart.fullname" . }} + name: {{{{ include "chart.fullname" . }}}} spec: - type: {{ $.Values.service.type }} + type: {{{{ $.Values.service.type }}}} ports: - name: main - port: {{ $.Values.service.port | default 3000 }} - targetPort: {{ $.Values.service.port | default 3000 }} + port: {{{{ $.Values.service.port | default {} }}}} + targetPort: {{{{ $.Values.service.port | default {} }}}} protocol: TCP selector: - app: {{ include "chart.name" . }} -"#; + app: {{{{ include "chart.name" . }}}} +"#, + self.service_port, self.service_port + ); fs::write(templates_dir.join("service.yaml"), service_yaml)?; // Create templates/deployment.yaml - let deployment_yaml = r#" + let deployment_yaml = format!( + r#" apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "chart.fullname" . }} + name: {{{{ include "chart.fullname" . }}}} spec: - replicas: {{ $.Values.replicaCount }} + replicas: {{{{ $.Values.replicaCount }}}} selector: matchLabels: - app: {{ include "chart.name" . }} + app: {{{{ include "chart.name" . }}}} template: metadata: labels: - app: {{ include "chart.name" . }} + app: {{{{ include "chart.name" . }}}} spec: containers: - - name: {{ .Chart.Name }} - image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ $.Values.image.pullPolicy }} + - name: {{{{ .Chart.Name }}}} + image: "{{{{ $.Values.image.repository }}}}:{{{{ $.Values.image.tag | default .Chart.AppVersion }}}}" + imagePullPolicy: {{{{ $.Values.image.pullPolicy }}}} ports: - name: main - containerPort: {{ $.Values.service.port | default 3000 }} + containerPort: {{{{ $.Values.service.port | default {} }}}} protocol: TCP -"#; +"#, + self.service_port + ); fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?; // Create templates/ingress.yaml - let ingress_yaml = r#" -{{- if $.Values.ingress.enabled -}} + let ingress_yaml = format!( + r#" +{{{{- if $.Values.ingress.enabled -}}}} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ include "chart.fullname" . }} + name: {{{{ include "chart.fullname" . }}}} annotations: - {{- toYaml $.Values.ingress.annotations | nindent 4 }} + {{{{- toYaml $.Values.ingress.annotations | nindent 4 }}}} spec: - {{- if $.Values.ingress.tls }} + {{{{- if $.Values.ingress.tls }}}} tls: - {{- range $.Values.ingress.tls }} + {{{{- range $.Values.ingress.tls }}}} - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} + {{{{- range .hosts }}}} + - {{{{ . | quote }}}} + {{{{- end }}}} + secretName: {{{{ .secretName }}}} + {{{{- end }}}} + {{{{- end }}}} rules: - {{- range $.Values.ingress.hosts }} - - host: {{ .host | quote }} + {{{{- range $.Values.ingress.hosts }}}} + - host: {{{{ .host | quote }}}} http: paths: - {{- range .paths }} - - path: {{ .path }} - pathType: {{ .pathType }} + {{{{- range .paths }}}} + - path: {{{{ .path }}}} + pathType: {{{{ .pathType }}}} backend: service: - name: {{ include "chart.fullname" $ }} + name: {{{{ include "chart.fullname" $ }}}} port: - number: {{ $.Values.service.port | default 3000 }} - {{- end }} - {{- end }} -{{- end }} -"#; + number: {{{{ $.Values.service.port | default {} }}}} + {{{{- end }}}} + {{{{- end }}}} +{{{{- end }}}} +"#, + self.service_port + ); fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?; Ok(chart_dir) @@ -571,7 +642,6 @@ spec: let chart_file_name = packaged_chart_path.file_stem().unwrap().to_str().unwrap(); let oci_push_url = format!("oci://{}/{}", *REGISTRY_URL, *REGISTRY_PROJECT); let oci_pull_url = format!("{oci_push_url}/{}-chart", self.name); - debug!( "Pushing Helm chart {} to {}", packaged_chart_path.to_string_lossy(), @@ -590,4 +660,20 @@ spec: debug!("push url {oci_push_url}"); Ok(format!("{}:{}", oci_pull_url, version)) } + + fn get_or_build_dockerfile(&self) -> Result> { + let existing_dockerfile = self.project_root.join("Dockerfile"); + + debug!("project_root = {:?}", self.project_root); + + debug!("checking = {:?}", existing_dockerfile); + if existing_dockerfile.exists() { + debug!( + "Checking path {:#?} for existing Dockerfile", + self.project_root.clone() + ); + return Ok(existing_dockerfile); + } + self.build_dockerfile() + } } From 0a324184adb92bb3b86543f9e952c9724eaa1eb6 Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 8 Sep 2025 13:59:12 +0000 Subject: [PATCH 07/11] fix/grafana-operator (#132) * deploy namespaced grafana operator in all cases Co-authored-by: Ian Letourneau Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/132 Co-authored-by: Willem Co-committed-by: Willem --- .../src/modules/prometheus/k8s_prometheus_alerting_score.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs b/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs index 30bc8bd..24ca918 100644 --- a/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs +++ b/harmony/src/modules/prometheus/k8s_prometheus_alerting_score.rs @@ -197,11 +197,6 @@ impl K8sPrometheusCRDAlertingInterpret { } async fn ensure_grafana_operator(&self) -> Result { - if self.crd_exists("grafanas.grafana.integreatly.org").await { - debug!("grafana CRDs already exist — skipping install."); - return Ok(Outcome::success("Grafana CRDs already exist".to_string())); - } - let _ = Command::new("helm") .args([ "repo", From ed70bfd2363fa9f54fcc5cf92a7fbbb23aee4b25 Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 8 Sep 2025 14:04:12 +0000 Subject: [PATCH 08/11] fix/argo (#133) * remove hardcoded value for domain name and namespace Co-authored-by: Ian Letourneau Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/133 Co-authored-by: Willem Co-committed-by: Willem --- harmony/src/domain/topology/k8s.rs | 17 +++++- .../features/continuous_delivery.rs | 10 ++-- .../application/features/helm_argocd_score.rs | 60 ++++++++++++++++--- 3 files changed, 72 insertions(+), 15 deletions(-) diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index c9d0d58..88bd2e8 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -17,7 +17,7 @@ use kube::{ }; use log::{debug, error, trace}; use serde::{Serialize, de::DeserializeOwned}; -use serde_json::json; +use serde_json::{Value, json}; use similar::TextDiff; use tokio::io::AsyncReadExt; @@ -53,6 +53,21 @@ impl K8sClient { }) } + pub async fn get_resource_json_value( + &self, + name: &str, + namespace: Option<&str>, + gvk: &GroupVersionKind, + ) -> Result { + let gvk = ApiResource::from_gvk(gvk); + let resource: Api = if let Some(ns) = namespace { + Api::namespaced_with(self.client.clone(), ns, &gvk) + } else { + Api::default_namespaced_with(self.client.clone(), &gvk) + }; + Ok(resource.get(name).await?) + } + pub async fn get_deployment( &self, name: &str, diff --git a/harmony/src/modules/application/features/continuous_delivery.rs b/harmony/src/modules/application/features/continuous_delivery.rs index 7b447d0..1bc2d9d 100644 --- a/harmony/src/modules/application/features/continuous_delivery.rs +++ b/harmony/src/modules/application/features/continuous_delivery.rs @@ -176,18 +176,18 @@ impl< } target => { info!("Deploying {} to target {target:?}", self.application.name()); + let score = ArgoHelmScore { - namespace: "harmony-example-rust-webapp".to_string(), + namespace: format!("{}", self.application.name()), openshift: true, - domain: "argo.harmonydemo.apps.ncd0.harmony.mcd".to_string(), argo_apps: vec![ArgoApplication::from(CDApplicationConfig { // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0 version: Version::from("0.1.0").unwrap(), helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), - helm_chart_name: "harmony-example-rust-webapp-chart".to_string(), + helm_chart_name: format!("{}-chart", self.application.name()), values_overrides: None, - name: "harmony-demo-rust-webapp".to_string(), - namespace: "harmony-example-rust-webapp".to_string(), + name: format!("{}", self.application.name()), + namespace: format!("{}", self.application.name()), })], }; score diff --git a/harmony/src/modules/application/features/helm_argocd_score.rs b/harmony/src/modules/application/features/helm_argocd_score.rs index c439727..bfa3d8b 100644 --- a/harmony/src/modules/application/features/helm_argocd_score.rs +++ b/harmony/src/modules/application/features/helm_argocd_score.rs @@ -1,7 +1,10 @@ use async_trait::async_trait; +use kube::{Api, api::GroupVersionKind}; +use log::{debug, warn}; use non_blank_string_rs::NonBlankString; use serde::Serialize; -use std::str::FromStr; +use serde::de::DeserializeOwned; +use std::{process::Command, str::FromStr, sync::Arc}; use crate::{ data::Version, @@ -9,7 +12,9 @@ use crate::{ inventory::Inventory, modules::helm::chart::{HelmChartScore, HelmRepository}, score::Score, - topology::{HelmCommand, K8sclient, Topology}, + topology::{ + HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient, + }, }; use harmony_types::id::Id; @@ -19,15 +24,13 @@ use super::ArgoApplication; pub struct ArgoHelmScore { pub namespace: String, pub openshift: bool, - pub domain: String, pub argo_apps: Vec, } impl Score for ArgoHelmScore { fn create_interpret(&self) -> Box> { - let helm_score = argo_helm_chart_score(&self.namespace, self.openshift, &self.domain); Box::new(ArgoInterpret { - score: helm_score, + score: self.clone(), argo_apps: self.argo_apps.clone(), }) } @@ -39,7 +42,7 @@ impl Score for ArgoHelmScore { #[derive(Debug)] pub struct ArgoInterpret { - score: HelmChartScore, + score: ArgoHelmScore, argo_apps: Vec, } @@ -50,9 +53,16 @@ impl Interpret for ArgoInterpret { inventory: &Inventory, topology: &T, ) -> Result { - self.score.interpret(inventory, topology).await?; - let k8s_client = topology.k8s_client().await?; + let domain = self + .get_host_domain(k8s_client.clone(), self.score.openshift) + .await?; + let domain = format!("argo.{domain}"); + let helm_score = + argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain); + + helm_score.interpret(inventory, topology).await?; + k8s_client .apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None) .await @@ -85,6 +95,38 @@ impl Interpret for ArgoInterpret { } } +impl ArgoInterpret { + pub async fn get_host_domain( + &self, + client: Arc, + openshift: bool, + ) -> Result { + //This should be the job of the topology to determine if we are in + //openshift, potentially we need on openshift topology the same way we create a + //localhosttopology + match openshift { + true => { + let gvk = GroupVersionKind { + group: "operator.openshift.io".into(), + version: "v1".into(), + kind: "IngressController".into(), + }; + let ic = client + .get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk) + .await?; + + match ic.data["status"]["domain"].as_str() { + Some(domain) => return Ok(domain.to_string()), + None => return Err(InterpretError::new("Could not find domain".to_string())), + } + } + false => { + todo!() + } + }; + } +} + pub fn argo_helm_chart_score(namespace: &str, openshift: bool, domain: &str) -> HelmChartScore { let values = format!( r#" @@ -660,7 +702,7 @@ server: # nginx.ingress.kubernetes.io/ssl-passthrough: "true" # -- Defines which ingress controller will implement the resource - ingressClassName: "" + ingressClassName: "openshift-default" # -- Argo CD server hostname # @default -- `""` (defaults to global.domain) From b42815f79c47ce33956ce838aad2aac41b3c8d9d Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 8 Sep 2025 14:22:05 +0000 Subject: [PATCH 09/11] feat: added a monitoring stack that works with openshift/okd (#134) * Okd needs to use the cluster observability operator in order to deploy namespaced prometheuses and alertmanagers * allow namespaced deployments of alertmanager and prometheuses as well as its associated rules, etc. Co-authored-by: Ian Letourneau Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/134 Co-authored-by: Willem Co-committed-by: Willem --- Cargo.lock | 15 + .../rhob_application_monitoring/Cargo.toml | 17 + .../rhob_application_monitoring/src/main.rs | 50 ++ harmony/src/domain/interpret/mod.rs | 2 + harmony/src/domain/topology/k8s_anywhere.rs | 107 +++- .../src/modules/application/features/mod.rs | 1 + .../application/features/rhob_monitoring.rs | 109 ++++ .../alert_channel/discord_alert_channel.rs | 67 +++ .../alert_channel/webhook_receiver.rs | 74 ++- .../monitoring/application_monitoring/mod.rs | 1 + .../rhobs_application_monitoring_score.rs | 94 ++++ .../monitoring/kube_prometheus/crd/mod.rs | 10 + .../crd/rhob_alertmanager_config.rs | 50 ++ .../kube_prometheus/crd/rhob_alertmanagers.rs | 52 ++ .../rhob_cluster_observability_operator.rs | 22 + .../kube_prometheus/crd/rhob_default_rules.rs | 26 + .../kube_prometheus/crd/rhob_grafana.rs | 153 ++++++ .../crd/rhob_monitoring_stack.rs | 41 ++ .../crd/rhob_prometheus_rules.rs | 57 ++ .../kube_prometheus/crd/rhob_prometheuses.rs | 118 +++++ .../kube_prometheus/crd/rhob_role.rs | 62 +++ .../crd/rhob_service_monitor.rs | 87 ++++ harmony/src/modules/prometheus/mod.rs | 1 + .../modules/prometheus/rhob_alerting_score.rs | 486 ++++++++++++++++++ 24 files changed, 1695 insertions(+), 7 deletions(-) create mode 100644 examples/rhob_application_monitoring/Cargo.toml create mode 100644 examples/rhob_application_monitoring/src/main.rs create mode 100644 harmony/src/modules/application/features/rhob_monitoring.rs create mode 100644 harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanager_config.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanagers.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_cluster_observability_operator.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_default_rules.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_grafana.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheus_rules.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheuses.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_role.rs create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/rhob_service_monitor.rs create mode 100644 harmony/src/modules/prometheus/rhob_alerting_score.rs diff --git a/Cargo.lock b/Cargo.lock index 8cdfe97..7b7fafe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4631,6 +4631,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "rhob-application-monitoring" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "env_logger", + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "log", + "tokio", + "url", +] + [[package]] name = "ring" version = "0.17.14" diff --git a/examples/rhob_application_monitoring/Cargo.toml b/examples/rhob_application_monitoring/Cargo.toml new file mode 100644 index 0000000..9ee4eee --- /dev/null +++ b/examples/rhob_application_monitoring/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "rhob-application-monitoring" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_types = { path = "../../harmony_types" } +harmony_macros = { path = "../../harmony_macros" } +tokio = { workspace = true } +log = { workspace = true } +env_logger = { workspace = true } +url = { workspace = true } +base64.workspace = true diff --git a/examples/rhob_application_monitoring/src/main.rs b/examples/rhob_application_monitoring/src/main.rs new file mode 100644 index 0000000..dd6a05c --- /dev/null +++ b/examples/rhob_application_monitoring/src/main.rs @@ -0,0 +1,50 @@ +use std::{path::PathBuf, sync::Arc}; + +use harmony::{ + inventory::Inventory, + modules::{ + application::{ + ApplicationScore, RustWebFramework, RustWebapp, + features::rhob_monitoring::RHOBMonitoring, + }, + monitoring::alert_channel::discord_alert_channel::DiscordWebhook, + }, + topology::K8sAnywhereTopology, +}; +use harmony_types::net::Url; + +#[tokio::main] +async fn main() { + let application = Arc::new(RustWebapp { + name: "test-rhob-monitoring".to_string(), + domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()), + project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param + framework: Some(RustWebFramework::Leptos), + service_port: 3000, + }); + + let discord_receiver = DiscordWebhook { + name: "test-discord".to_string(), + url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()), + }; + + let app = ApplicationScore { + features: vec![ + Box::new(RHOBMonitoring { + application: application.clone(), + alert_receiver: vec![Box::new(discord_receiver)], + }), + // TODO add backups, multisite ha, etc + ], + application, + }; + + harmony_cli::run( + Inventory::autoload(), + K8sAnywhereTopology::from_env(), + vec![Box::new(app)], + None, + ) + .await + .unwrap(); +} diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index 71d2f61..fac18df 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -32,6 +32,7 @@ pub enum InterpretName { K8sPrometheusCrdAlerting, DiscoverInventoryAgent, CephClusterHealth, + RHOBAlerting, } impl std::fmt::Display for InterpretName { @@ -60,6 +61,7 @@ impl std::fmt::Display for InterpretName { InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), + InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), } } } diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index f81bef4..119ad13 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -14,10 +14,11 @@ use crate::{ monitoring::kube_prometheus::crd::{ crd_alertmanager_config::CRDPrometheus, prometheus_operator::prometheus_operator_helm_chart_score, + rhob_alertmanager_config::RHOBObservability, }, prometheus::{ k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore, - prometheus::PrometheusApplicationMonitoring, + prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore, }, }, score::Score, @@ -108,6 +109,43 @@ impl PrometheusApplicationMonitoring for K8sAnywhereTopology { } } +#[async_trait] +impl PrometheusApplicationMonitoring for K8sAnywhereTopology { + async fn install_prometheus( + &self, + sender: &RHOBObservability, + inventory: &Inventory, + receivers: Option>>>, + ) -> Result { + let po_result = self.ensure_cluster_observability_operator(sender).await?; + + if po_result == PreparationOutcome::Noop { + debug!("Skipping Prometheus CR installation due to missing operator."); + return Ok(po_result); + } + + let result = self + .get_cluster_observability_operator_prometheus_application_score( + sender.clone(), + receivers, + ) + .await + .interpret(inventory, self) + .await; + + match result { + Ok(outcome) => match outcome.status { + InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success { + details: outcome.message, + }), + InterpretStatus::NOOP => Ok(PreparationOutcome::Noop), + _ => Err(PreparationError::new(outcome.message)), + }, + Err(err) => Err(PreparationError::new(err.to_string())), + } + } +} + impl Serialize for K8sAnywhereTopology { fn serialize(&self, _serializer: S) -> Result where @@ -134,6 +172,19 @@ impl K8sAnywhereTopology { } } + async fn get_cluster_observability_operator_prometheus_application_score( + &self, + sender: RHOBObservability, + receivers: Option>>>, + ) -> RHOBAlertingScore { + RHOBAlertingScore { + sender, + receivers: receivers.unwrap_or_default(), + service_monitors: vec![], + prometheus_rules: vec![], + } + } + async fn get_k8s_prometheus_application_score( &self, sender: CRDPrometheus, @@ -286,6 +337,60 @@ impl K8sAnywhereTopology { } } + async fn ensure_cluster_observability_operator( + &self, + sender: &RHOBObservability, + ) -> Result { + let status = Command::new("sh") + .args(["-c", "kubectl get crd -A | grep -i rhobs"]) + .status() + .map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?; + + if !status.success() { + if let Some(Some(k8s_state)) = self.k8s_state.get() { + match k8s_state.source { + K8sSource::LocalK3d => { + debug!("installing cluster observability operator"); + todo!(); + let op_score = + prometheus_operator_helm_chart_score(sender.namespace.clone()); + let result = op_score.interpret(&Inventory::empty(), self).await; + + return match result { + Ok(outcome) => match outcome.status { + InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success { + details: "installed cluster observability operator".into(), + }), + InterpretStatus::NOOP => Ok(PreparationOutcome::Noop), + _ => Err(PreparationError::new( + "failed to install cluster observability operator (unknown error)".into(), + )), + }, + Err(err) => Err(PreparationError::new(err.to_string())), + }; + } + K8sSource::Kubeconfig => { + debug!( + "unable to install cluster observability operator, contact cluster admin" + ); + return Ok(PreparationOutcome::Noop); + } + } + } else { + warn!( + "Unable to detect k8s_state. Skipping Cluster Observability Operator install." + ); + return Ok(PreparationOutcome::Noop); + } + } + + debug!("Cluster Observability Operator is already present, skipping install"); + + Ok(PreparationOutcome::Success { + details: "cluster observability operator present in cluster".into(), + }) + } + async fn ensure_prometheus_operator( &self, sender: &CRDPrometheus, diff --git a/harmony/src/modules/application/features/mod.rs b/harmony/src/modules/application/features/mod.rs index ea979bd..93f6412 100644 --- a/harmony/src/modules/application/features/mod.rs +++ b/harmony/src/modules/application/features/mod.rs @@ -1,4 +1,5 @@ mod endpoint; +pub mod rhob_monitoring; pub use endpoint::*; mod monitoring; diff --git a/harmony/src/modules/application/features/rhob_monitoring.rs b/harmony/src/modules/application/features/rhob_monitoring.rs new file mode 100644 index 0000000..62a5323 --- /dev/null +++ b/harmony/src/modules/application/features/rhob_monitoring.rs @@ -0,0 +1,109 @@ +use std::sync::Arc; + +use crate::modules::application::{Application, ApplicationFeature}; +use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; +use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; + +use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; +use crate::topology::MultiTargetTopology; +use crate::{ + inventory::Inventory, + modules::monitoring::{ + alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore, + }, + score::Score, + topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, +}; +use crate::{ + modules::prometheus::prometheus::PrometheusApplicationMonitoring, + topology::oberservability::monitoring::AlertReceiver, +}; +use async_trait::async_trait; +use base64::{Engine as _, engine::general_purpose}; +use harmony_types::net::Url; +use log::{debug, info}; + +#[derive(Debug, Clone)] +pub struct RHOBMonitoring { + pub application: Arc, + pub alert_receiver: Vec>>, +} + +#[async_trait] +impl< + T: Topology + + HelmCommand + + 'static + + TenantManager + + K8sclient + + MultiTargetTopology + + std::fmt::Debug + + PrometheusApplicationMonitoring, +> ApplicationFeature for RHOBMonitoring +{ + async fn ensure_installed(&self, topology: &T) -> Result<(), String> { + info!("Ensuring monitoring is available for application"); + let namespace = topology + .get_tenant_config() + .await + .map(|ns| ns.name.clone()) + .unwrap_or_else(|| self.application.name()); + + let mut alerting_score = ApplicationRHOBMonitoringScore { + sender: RHOBObservability { + namespace: namespace.clone(), + client: topology.k8s_client().await.unwrap(), + }, + application: self.application.clone(), + receivers: self.alert_receiver.clone(), + }; + let ntfy = NtfyScore { + namespace: namespace.clone(), + host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), + }; + ntfy.interpret(&Inventory::empty(), topology) + .await + .map_err(|e| e.to_string())?; + + let ntfy_default_auth_username = "harmony"; + let ntfy_default_auth_password = "harmony"; + let ntfy_default_auth_header = format!( + "Basic {}", + general_purpose::STANDARD.encode(format!( + "{ntfy_default_auth_username}:{ntfy_default_auth_password}" + )) + ); + + debug!("ntfy_default_auth_header: {ntfy_default_auth_header}"); + + let ntfy_default_auth_param = general_purpose::STANDARD + .encode(ntfy_default_auth_header) + .replace("=", ""); + + debug!("ntfy_default_auth_param: {ntfy_default_auth_param}"); + + let ntfy_receiver = WebhookReceiver { + name: "ntfy-webhook".to_string(), + url: Url::Url( + url::Url::parse( + format!( + "http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}", + namespace.clone() + ) + .as_str(), + ) + .unwrap(), + ), + }; + + alerting_score.receivers.push(Box::new(ntfy_receiver)); + alerting_score + .interpret(&Inventory::empty(), topology) + .await + .map_err(|e| e.to_string())?; + Ok(()) + } + fn name(&self) -> String { + "Monitoring".to_string() + } +} diff --git a/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs b/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs index caab4d1..748c677 100644 --- a/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs +++ b/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs @@ -4,6 +4,7 @@ use std::collections::BTreeMap; use async_trait::async_trait; use k8s_openapi::api::core::v1::Secret; use kube::api::ObjectMeta; +use log::debug; use serde::Serialize; use serde_json::json; use serde_yaml::{Mapping, Value}; @@ -11,6 +12,7 @@ use serde_yaml::{Mapping, Value}; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{ AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus, }; +use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::{ interpret::{InterpretError, Outcome}, modules::monitoring::{ @@ -30,6 +32,71 @@ pub struct DiscordWebhook { pub url: Url, } +#[async_trait] +impl AlertReceiver for DiscordWebhook { + async fn install(&self, sender: &RHOBObservability) -> Result { + let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec { + data: json!({ + "route": { + "receiver": self.name, + }, + "receivers": [ + { + "name": self.name, + "webhookConfigs": [ + { + "url": self.url, + } + ] + } + ] + }), + }; + + let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig { + metadata: ObjectMeta { + name: Some(self.name.clone()), + labels: Some(std::collections::BTreeMap::from([( + "alertmanagerConfig".to_string(), + "enabled".to_string(), + )])), + namespace: Some(sender.namespace.clone()), + ..Default::default() + }, + spec, + }; + debug!( + "alertmanager_configs yaml:\n{:#?}", + serde_yaml::to_string(&alertmanager_configs) + ); + debug!( + "alert manager configs: \n{:#?}", + alertmanager_configs.clone() + ); + + sender + .client + .apply(&alertmanager_configs, Some(&sender.namespace)) + .await?; + Ok(Outcome::success(format!( + "installed rhob-alertmanagerconfigs for {}", + self.name + ))) + } + + fn name(&self) -> String { + "webhook-receiver".to_string() + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn as_any(&self) -> &dyn Any { + self + } +} + #[async_trait] impl AlertReceiver for DiscordWebhook { async fn install(&self, sender: &CRDPrometheus) -> Result { diff --git a/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs b/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs index 51e63b6..52124ff 100644 --- a/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs +++ b/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs @@ -11,8 +11,8 @@ use crate::{ interpret::{InterpretError, Outcome}, modules::monitoring::{ kube_prometheus::{ - crd::crd_alertmanager_config::{ - AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus, + crd::{ + crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability, }, prometheus::{KubePrometheus, KubePrometheusReceiver}, types::{AlertChannelConfig, AlertManagerChannelConfig}, @@ -30,9 +30,9 @@ pub struct WebhookReceiver { } #[async_trait] -impl AlertReceiver for WebhookReceiver { - async fn install(&self, sender: &CRDPrometheus) -> Result { - let spec = AlertmanagerConfigSpec { +impl AlertReceiver for WebhookReceiver { + async fn install(&self, sender: &RHOBObservability) -> Result { + let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec { data: json!({ "route": { "receiver": self.name, @@ -50,7 +50,68 @@ impl AlertReceiver for WebhookReceiver { }), }; - let alertmanager_configs = AlertmanagerConfig { + let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfig { + metadata: ObjectMeta { + name: Some(self.name.clone()), + labels: Some(std::collections::BTreeMap::from([( + "alertmanagerConfig".to_string(), + "enabled".to_string(), + )])), + namespace: Some(sender.namespace.clone()), + ..Default::default() + }, + spec, + }; + debug!( + "alert manager configs: \n{:#?}", + alertmanager_configs.clone() + ); + + sender + .client + .apply(&alertmanager_configs, Some(&sender.namespace)) + .await?; + Ok(Outcome::success(format!( + "installed rhob-alertmanagerconfigs for {}", + self.name + ))) + } + + fn name(&self) -> String { + "webhook-receiver".to_string() + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +#[async_trait] +impl AlertReceiver for WebhookReceiver { + async fn install(&self, sender: &CRDPrometheus) -> Result { + let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec { + data: json!({ + "route": { + "receiver": self.name, + }, + "receivers": [ + { + "name": self.name, + "webhookConfigs": [ + { + "url": self.url, + } + ] + } + ] + }), + }; + + let alertmanager_configs = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfig { metadata: ObjectMeta { name: Some(self.name.clone()), labels: Some(std::collections::BTreeMap::from([( @@ -115,6 +176,7 @@ impl PrometheusReceiver for WebhookReceiver { self.get_config().await } } + #[async_trait] impl AlertReceiver for WebhookReceiver { async fn install(&self, sender: &KubePrometheus) -> Result { diff --git a/harmony/src/modules/monitoring/application_monitoring/mod.rs b/harmony/src/modules/monitoring/application_monitoring/mod.rs index c243cd7..5d12f78 100644 --- a/harmony/src/modules/monitoring/application_monitoring/mod.rs +++ b/harmony/src/modules/monitoring/application_monitoring/mod.rs @@ -1 +1,2 @@ pub mod application_monitoring_score; +pub mod rhobs_application_monitoring_score; diff --git a/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs b/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs new file mode 100644 index 0000000..17e42c3 --- /dev/null +++ b/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs @@ -0,0 +1,94 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use serde::Serialize; + +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + modules::{ + application::Application, + monitoring::kube_prometheus::crd::{ + crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability, + }, + prometheus::prometheus::PrometheusApplicationMonitoring, + }, + score::Score, + topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver}, +}; +use harmony_types::id::Id; + +#[derive(Debug, Clone, Serialize)] +pub struct ApplicationRHOBMonitoringScore { + pub sender: RHOBObservability, + pub application: Arc, + pub receivers: Vec>>, +} + +impl> Score + for ApplicationRHOBMonitoringScore +{ + fn create_interpret(&self) -> Box> { + Box::new(ApplicationRHOBMonitoringInterpret { + score: self.clone(), + }) + } + + fn name(&self) -> String { + format!( + "{} monitoring [ApplicationRHOBMonitoringScore]", + self.application.name() + ) + } +} + +#[derive(Debug)] +pub struct ApplicationRHOBMonitoringInterpret { + score: ApplicationRHOBMonitoringScore, +} + +#[async_trait] +impl> Interpret + for ApplicationRHOBMonitoringInterpret +{ + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + let result = topology + .install_prometheus( + &self.score.sender, + inventory, + Some(self.score.receivers.clone()), + ) + .await; + + match result { + Ok(outcome) => match outcome { + PreparationOutcome::Success { details: _ } => { + Ok(Outcome::success("Prometheus installed".into())) + } + PreparationOutcome::Noop => Ok(Outcome::noop()), + }, + Err(err) => Err(InterpretError::from(err)), + } + } + + fn get_name(&self) -> InterpretName { + InterpretName::ApplicationMonitoring + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs index 236a2de..4dbea74 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs @@ -7,5 +7,15 @@ pub mod crd_prometheuses; pub mod grafana_default_dashboard; pub mod grafana_operator; pub mod prometheus_operator; +pub mod rhob_alertmanager_config; +pub mod rhob_alertmanagers; +pub mod rhob_cluster_observability_operator; +pub mod rhob_default_rules; +pub mod rhob_grafana; +pub mod rhob_monitoring_stack; +pub mod rhob_prometheus_rules; +pub mod rhob_prometheuses; +pub mod rhob_role; +pub mod rhob_service_monitor; pub mod role; pub mod service_monitor; diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanager_config.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanager_config.rs new file mode 100644 index 0000000..a53b24e --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanager_config.rs @@ -0,0 +1,50 @@ +use std::sync::Arc; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::topology::{ + k8s::K8sClient, + oberservability::monitoring::{AlertReceiver, AlertSender}, +}; + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1alpha1", + kind = "AlertmanagerConfig", + plural = "alertmanagerconfigs", + namespaced +)] +pub struct AlertmanagerConfigSpec { + #[serde(flatten)] + pub data: serde_json::Value, +} + +#[derive(Debug, Clone, Serialize)] +pub struct RHOBObservability { + pub namespace: String, + pub client: Arc, +} + +impl AlertSender for RHOBObservability { + fn name(&self) -> String { + "RHOBAlertManager".to_string() + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + self.clone_box() + } +} + +impl Serialize for Box> { + fn serialize(&self, _serializer: S) -> Result + where + S: serde::Serializer, + { + todo!() + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanagers.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanagers.rs new file mode 100644 index 0000000..4435467 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_alertmanagers.rs @@ -0,0 +1,52 @@ +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::crd_prometheuses::LabelSelector; + +/// Rust CRD for `Alertmanager` from Prometheus Operator +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1", + kind = "Alertmanager", + plural = "alertmanagers", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct AlertmanagerSpec { + /// Number of replicas for HA + pub replicas: i32, + + /// Selectors for AlertmanagerConfig CRDs + #[serde(default, skip_serializing_if = "Option::is_none")] + pub alertmanager_config_selector: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub alertmanager_config_namespace_selector: Option, + + /// Optional pod template metadata (annotations, labels) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pod_metadata: Option, + + /// Optional topology spread settings + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +impl Default for AlertmanagerSpec { + fn default() -> Self { + AlertmanagerSpec { + replicas: 1, + + // Match all AlertmanagerConfigs in the same namespace + alertmanager_config_namespace_selector: None, + + // Empty selector matches all AlertmanagerConfigs in that namespace + alertmanager_config_selector: Some(LabelSelector::default()), + + pod_metadata: None, + version: None, + } + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_cluster_observability_operator.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_cluster_observability_operator.rs new file mode 100644 index 0000000..bc7ad9f --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_cluster_observability_operator.rs @@ -0,0 +1,22 @@ +use std::str::FromStr; + +use non_blank_string_rs::NonBlankString; + +use crate::modules::helm::chart::HelmChartScore; +//TODO package chart or something for COO okd +pub fn rhob_cluster_observability_operator() -> HelmChartScore { + HelmChartScore { + namespace: None, + release_name: NonBlankString::from_str("").unwrap(), + chart_name: NonBlankString::from_str( + "oci://hub.nationtech.io/harmony/nt-prometheus-operator", + ) + .unwrap(), + chart_version: None, + values_overrides: None, + values_yaml: None, + create_namespace: true, + install_only: true, + repository: None, + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_default_rules.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_default_rules.rs new file mode 100644 index 0000000..459bd3f --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_default_rules.rs @@ -0,0 +1,26 @@ +use crate::modules::{ + monitoring::kube_prometheus::crd::rhob_prometheus_rules::Rule, + prometheus::alerts::k8s::{ + deployment::alert_deployment_unavailable, + pod::{alert_container_restarting, alert_pod_not_ready, pod_failed}, + pvc::high_pvc_fill_rate_over_two_days, + service::alert_service_down, + }, +}; + +pub fn build_default_application_rules() -> Vec { + let pod_failed: Rule = pod_failed().into(); + let container_restarting: Rule = alert_container_restarting().into(); + let pod_not_ready: Rule = alert_pod_not_ready().into(); + let service_down: Rule = alert_service_down().into(); + let deployment_unavailable: Rule = alert_deployment_unavailable().into(); + let high_pvc_fill_rate: Rule = high_pvc_fill_rate_over_two_days().into(); + vec![ + pod_failed, + container_restarting, + pod_not_ready, + service_down, + deployment_unavailable, + high_pvc_fill_rate, + ] +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_grafana.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_grafana.rs new file mode 100644 index 0000000..65efab9 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_grafana.rs @@ -0,0 +1,153 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "grafana.integreatly.org", + version = "v1beta1", + kind = "Grafana", + plural = "grafanas", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaSpec { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub config: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub admin_user: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub admin_password: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ingress: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub persistence: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub log: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub security: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaLogConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mode: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub level: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaSecurityConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub admin_user: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub admin_password: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaIngress { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enabled: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hosts: Option>, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaPersistence { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enabled: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub storage_class_name: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub size: Option, +} + +// ------------------------------------------------------------------------------------------------ + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "grafana.integreatly.org", + version = "v1beta1", + kind = "GrafanaDashboard", + plural = "grafanadashboards", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaDashboardSpec { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resync_period: Option, + + pub instance_selector: LabelSelector, + + pub json: String, +} + +// ------------------------------------------------------------------------------------------------ + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "grafana.integreatly.org", + version = "v1beta1", + kind = "GrafanaDatasource", + plural = "grafanadatasources", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaDatasourceSpec { + pub instance_selector: LabelSelector, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub allow_cross_namespace_import: Option, + + pub datasource: GrafanaDatasourceConfig, +} + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct GrafanaDatasourceConfig { + pub access: String, + pub database: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub json_data: Option>, + pub name: String, + pub r#type: String, + pub url: String, +} + +// ------------------------------------------------------------------------------------------------ + +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)] +#[serde(rename_all = "camelCase")] +pub struct ResourceRequirements { + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub limits: BTreeMap, + + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub requests: BTreeMap, +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs new file mode 100644 index 0000000..bd542e9 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs @@ -0,0 +1,41 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; + +/// MonitoringStack CRD for monitoring.rhobs/v1alpha1 +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1alpha1", + kind = "MonitoringStack", + plural = "monitoringstacks", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct MonitoringStackSpec { + /// Verbosity of logs (e.g. "debug", "info", "warn", "error"). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub log_level: Option, + + /// Retention period for Prometheus TSDB data (e.g. "1d"). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub retention: Option, + + /// Resource selector for workloads monitored by this stack. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource_selector: Option, +} + +impl Default for MonitoringStackSpec { + fn default() -> Self { + MonitoringStackSpec { + log_level: Some("info".into()), + retention: Some("7d".into()), + resource_selector: Some(LabelSelector::default()), + } + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheus_rules.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheus_rules.rs new file mode 100644 index 0000000..e2b5b60 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheus_rules.rs @@ -0,0 +1,57 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule; + +#[derive(CustomResource, Debug, Serialize, Deserialize, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1", + kind = "PrometheusRule", + plural = "prometheusrules", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct PrometheusRuleSpec { + pub groups: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +pub struct RuleGroup { + pub name: String, + pub rules: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Rule { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub alert: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub expr: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub for_: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, +} + +impl From for Rule { + fn from(value: PrometheusAlertRule) -> Self { + Rule { + alert: Some(value.alert), + expr: Some(value.expr), + for_: value.r#for, + labels: Some(value.labels.into_iter().collect::>()), + annotations: Some(value.annotations.into_iter().collect::>()), + } + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheuses.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheuses.rs new file mode 100644 index 0000000..18d3f57 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_prometheuses.rs @@ -0,0 +1,118 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::modules::monitoring::kube_prometheus::types::Operator; + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1", + kind = "Prometheus", + plural = "prometheuses", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct PrometheusSpec { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub alerting: Option, + + pub service_account_name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service_monitor_namespace_selector: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service_monitor_selector: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service_discovery_role: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pod_monitor_selector: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub rule_selector: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub rule_namespace_selector: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)] +#[serde(rename_all = "camelCase")] +pub struct NamespaceSelector { + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub match_names: Vec, +} + +/// Contains alerting configuration, specifically Alertmanager endpoints. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)] +pub struct PrometheusSpecAlerting { + #[serde(skip_serializing_if = "Option::is_none")] + pub alertmanagers: Option>, +} + +/// Represents an Alertmanager endpoint configuration used by Prometheus. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)] +pub struct AlertmanagerEndpoints { + /// Name of the Alertmanager Service. + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + + /// Namespace of the Alertmanager Service. + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace: Option, + + /// Port to access on the Alertmanager Service (e.g. "web"). + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + + /// Scheme to use for connecting (e.g. "http"). + #[serde(skip_serializing_if = "Option::is_none")] + pub scheme: Option, + // Other fields like `tls_config`, `path_prefix`, etc., can be added if needed. +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)] +#[serde(rename_all = "camelCase")] +pub struct LabelSelector { + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub match_labels: BTreeMap, + + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub match_expressions: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct LabelSelectorRequirement { + pub key: String, + pub operator: Operator, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub values: Vec, +} + +impl Default for PrometheusSpec { + fn default() -> Self { + PrometheusSpec { + alerting: None, + + service_account_name: "prometheus".into(), + + // null means "only my namespace" + service_monitor_namespace_selector: None, + + // empty selector means match all ServiceMonitors in that namespace + service_monitor_selector: Some(LabelSelector::default()), + + service_discovery_role: Some("Endpoints".into()), + + pod_monitor_selector: None, + + rule_selector: None, + + rule_namespace_selector: Some(LabelSelector::default()), + } + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_role.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_role.rs new file mode 100644 index 0000000..9add9a9 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_role.rs @@ -0,0 +1,62 @@ +use k8s_openapi::api::{ + core::v1::ServiceAccount, + rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject}, +}; +use kube::api::ObjectMeta; + +pub fn build_prom_role(role_name: String, namespace: String) -> Role { + Role { + metadata: ObjectMeta { + name: Some(role_name), + namespace: Some(namespace), + ..Default::default() + }, + rules: Some(vec![PolicyRule { + api_groups: Some(vec!["".into()]), // core API group + resources: Some(vec!["services".into(), "endpoints".into(), "pods".into()]), + verbs: vec!["get".into(), "list".into(), "watch".into()], + ..Default::default() + }]), + } +} + +pub fn build_prom_rolebinding( + role_name: String, + namespace: String, + service_account_name: String, +) -> RoleBinding { + RoleBinding { + metadata: ObjectMeta { + name: Some(format!("{}-rolebinding", role_name)), + namespace: Some(namespace.clone()), + ..Default::default() + }, + role_ref: RoleRef { + api_group: "rbac.authorization.k8s.io".into(), + kind: "Role".into(), + name: role_name, + }, + subjects: Some(vec![Subject { + kind: "ServiceAccount".into(), + name: service_account_name, + namespace: Some(namespace.clone()), + ..Default::default() + }]), + } +} + +pub fn build_prom_service_account( + service_account_name: String, + namespace: String, +) -> ServiceAccount { + ServiceAccount { + automount_service_account_token: None, + image_pull_secrets: None, + metadata: ObjectMeta { + name: Some(service_account_name), + namespace: Some(namespace), + ..Default::default() + }, + secrets: None, + } +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_service_monitor.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_service_monitor.rs new file mode 100644 index 0000000..6a981f2 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_service_monitor.rs @@ -0,0 +1,87 @@ +use std::collections::HashMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::modules::monitoring::kube_prometheus::types::{ + HTTPScheme, MatchExpression, NamespaceSelector, Operator, Selector, + ServiceMonitor as KubeServiceMonitor, ServiceMonitorEndpoint, +}; + +/// This is the top-level struct for the ServiceMonitor Custom Resource. +/// The `#[derive(CustomResource)]` macro handles all the boilerplate for you, +/// including the `impl Resource`. +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.rhobs", + version = "v1", + kind = "ServiceMonitor", + plural = "servicemonitors", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct ServiceMonitorSpec { + /// A label selector to select services to monitor. + pub selector: Selector, + + /// A list of endpoints on the selected services to be monitored. + pub endpoints: Vec, + + /// Selector to select which namespaces the Kubernetes Endpoints objects + /// are discovered from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace_selector: Option, + + /// The label to use to retrieve the job name from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub job_label: Option, + + /// Pod-based target labels to transfer from the Kubernetes Pod onto the target. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub pod_target_labels: Vec, + + /// TargetLabels transfers labels on the Kubernetes Service object to the target. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub target_labels: Vec, +} + +impl Default for ServiceMonitorSpec { + fn default() -> Self { + let labels = HashMap::new(); + Self { + selector: Selector { + match_labels: { labels }, + match_expressions: vec![MatchExpression { + key: "app.kubernetes.io/name".into(), + operator: Operator::Exists, + values: vec![], + }], + }, + endpoints: vec![ServiceMonitorEndpoint { + port: Some("http".to_string()), + path: Some("/metrics".into()), + interval: Some("30s".into()), + scheme: Some(HTTPScheme::HTTP), + ..Default::default() + }], + namespace_selector: None, // only the same namespace + job_label: Some("app".into()), + pod_target_labels: vec![], + target_labels: vec![], + } + } +} + +impl From for ServiceMonitorSpec { + fn from(value: KubeServiceMonitor) -> Self { + Self { + selector: value.selector, + endpoints: value.endpoints, + namespace_selector: value.namespace_selector, + job_label: value.job_label, + pod_target_labels: value.pod_target_labels, + target_labels: value.target_labels, + } + } +} diff --git a/harmony/src/modules/prometheus/mod.rs b/harmony/src/modules/prometheus/mod.rs index b77f199..c4f25ba 100644 --- a/harmony/src/modules/prometheus/mod.rs +++ b/harmony/src/modules/prometheus/mod.rs @@ -2,3 +2,4 @@ pub mod alerts; pub mod k8s_prometheus_alerting_score; #[allow(clippy::module_inception)] pub mod prometheus; +pub mod rhob_alerting_score; diff --git a/harmony/src/modules/prometheus/rhob_alerting_score.rs b/harmony/src/modules/prometheus/rhob_alerting_score.rs new file mode 100644 index 0000000..97fa644 --- /dev/null +++ b/harmony/src/modules/prometheus/rhob_alerting_score.rs @@ -0,0 +1,486 @@ +use std::fs; +use std::{collections::BTreeMap, sync::Arc}; +use tempfile::tempdir; + +use async_trait::async_trait; +use kube::api::ObjectMeta; +use log::{debug, info}; +use serde::Serialize; +use std::process::Command; + +use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard; +use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; +use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{ + Alertmanager, AlertmanagerSpec, +}; +use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{ + Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig, + GrafanaDatasourceSpec, GrafanaSpec, +}; +use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{ + MonitoringStack, MonitoringStackSpec, +}; +use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{ + PrometheusRule, PrometheusRuleSpec, RuleGroup, +}; +use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; + +use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{ + ServiceMonitor, ServiceMonitorSpec, +}; +use crate::score::Score; +use crate::topology::oberservability::monitoring::AlertReceiver; +use crate::topology::{K8sclient, Topology, k8s::K8sClient}; +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, +}; +use harmony_types::id::Id; + +use super::prometheus::PrometheusApplicationMonitoring; + +#[derive(Clone, Debug, Serialize)] +pub struct RHOBAlertingScore { + pub sender: RHOBObservability, + pub receivers: Vec>>, + pub service_monitors: Vec, + pub prometheus_rules: Vec, +} + +impl> Score + for RHOBAlertingScore +{ + fn create_interpret(&self) -> Box> { + Box::new(RHOBAlertingInterpret { + sender: self.sender.clone(), + receivers: self.receivers.clone(), + service_monitors: self.service_monitors.clone(), + prometheus_rules: self.prometheus_rules.clone(), + }) + } + + fn name(&self) -> String { + "RHOB alerting [RHOBAlertingScore]".into() + } +} + +#[derive(Clone, Debug)] +pub struct RHOBAlertingInterpret { + pub sender: RHOBObservability, + pub receivers: Vec>>, + pub service_monitors: Vec, + pub prometheus_rules: Vec, +} + +#[async_trait] +impl> Interpret + for RHOBAlertingInterpret +{ + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + let client = topology.k8s_client().await.unwrap(); + self.ensure_grafana_operator().await?; + self.install_prometheus(&client).await?; + self.install_client_kube_metrics().await?; + self.install_grafana(&client).await?; + self.install_receivers(&self.sender, &self.receivers) + .await?; + self.install_rules(&self.prometheus_rules, &client).await?; + self.install_monitors(self.service_monitors.clone(), &client) + .await?; + Ok(Outcome::success( + "K8s monitoring components installed".to_string(), + )) + } + + fn get_name(&self) -> InterpretName { + InterpretName::RHOBAlerting + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} + +impl RHOBAlertingInterpret { + async fn crd_exists(&self, crd: &str) -> bool { + let status = Command::new("sh") + .args(["-c", &format!("kubectl get crd -A | grep -i {crd}")]) + .status() + .map_err(|e| InterpretError::new(format!("could not connect to cluster: {}", e))) + .unwrap(); + + status.success() + } + + async fn install_chart( + &self, + chart_path: String, + chart_name: String, + ) -> Result<(), InterpretError> { + let temp_dir = + tempdir().map_err(|e| InterpretError::new(format!("Tempdir error: {}", e)))?; + let temp_path = temp_dir.path().to_path_buf(); + debug!("Using temp directory: {}", temp_path.display()); + let chart = format!("{}/{}", chart_path, chart_name); + let pull_output = Command::new("helm") + .args(["pull", &chart, "--destination", temp_path.to_str().unwrap()]) + .output() + .map_err(|e| InterpretError::new(format!("Helm pull error: {}", e)))?; + + if !pull_output.status.success() { + return Err(InterpretError::new(format!( + "Helm pull failed: {}", + String::from_utf8_lossy(&pull_output.stderr) + ))); + } + + let tgz_path = fs::read_dir(&temp_path) + .unwrap() + .filter_map(|entry| { + let entry = entry.ok()?; + let path = entry.path(); + if path.extension()? == "tgz" { + Some(path) + } else { + None + } + }) + .next() + .ok_or_else(|| InterpretError::new("Could not find pulled Helm chart".into()))?; + + debug!("Installing chart from: {}", tgz_path.display()); + + let install_output = Command::new("helm") + .args([ + "upgrade", + "--install", + &chart_name, + tgz_path.to_str().unwrap(), + "--namespace", + &self.sender.namespace.clone(), + "--create-namespace", + "--wait", + "--atomic", + ]) + .output() + .map_err(|e| InterpretError::new(format!("Helm install error: {}", e)))?; + + if !install_output.status.success() { + return Err(InterpretError::new(format!( + "Helm install failed: {}", + String::from_utf8_lossy(&install_output.stderr) + ))); + } + + debug!( + "Installed chart {}/{} in namespace: {}", + &chart_path, + &chart_name, + self.sender.namespace.clone() + ); + Ok(()) + } + + async fn ensure_grafana_operator(&self) -> Result { + let _ = Command::new("helm") + .args([ + "repo", + "add", + "grafana-operator", + "https://grafana.github.io/helm-charts", + ]) + .output() + .unwrap(); + + let _ = Command::new("helm") + .args(["repo", "update"]) + .output() + .unwrap(); + + let output = Command::new("helm") + .args([ + "install", + "grafana-operator", + "grafana-operator/grafana-operator", + "--namespace", + &self.sender.namespace.clone(), + "--create-namespace", + "--set", + "namespaceScope=true", + ]) + .output() + .unwrap(); + + if !output.status.success() { + return Err(InterpretError::new(format!( + "helm install failed:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ))); + } + + Ok(Outcome::success(format!( + "installed grafana operator in ns {}", + self.sender.namespace.clone() + ))) + } + + async fn install_prometheus(&self, client: &Arc) -> Result { + debug!( + "installing crd-prometheuses in namespace {}", + self.sender.namespace.clone() + ); + + let stack = MonitoringStack { + metadata: ObjectMeta { + name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()), + namespace: Some(self.sender.namespace.clone()), + labels: Some([("coo".into(), "example".into())].into()), + ..Default::default() + }, + spec: MonitoringStackSpec { + log_level: Some("debug".into()), + retention: Some("1d".into()), + resource_selector: Some(LabelSelector { + match_labels: [("app".into(), "demo".into())].into(), + ..Default::default() + }), + }, + }; + + client + .apply(&stack, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + info!("installed rhob monitoring stack",); + Ok(Outcome::success(format!( + "successfully deployed rhob-prometheus {:#?}", + stack + ))) + } + + async fn install_alert_manager( + &self, + client: &Arc, + ) -> Result { + let am = Alertmanager { + metadata: ObjectMeta { + name: Some(self.sender.namespace.clone()), + labels: Some(std::collections::BTreeMap::from([( + "alertmanagerConfig".to_string(), + "enabled".to_string(), + )])), + namespace: Some(self.sender.namespace.clone()), + ..Default::default() + }, + spec: AlertmanagerSpec::default(), + }; + client + .apply(&am, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + Ok(Outcome::success(format!( + "successfully deployed service monitor {:#?}", + am.metadata.name + ))) + } + async fn install_monitors( + &self, + mut monitors: Vec, + client: &Arc, + ) -> Result { + let default_service_monitor = ServiceMonitor { + metadata: ObjectMeta { + name: Some(self.sender.namespace.clone()), + labels: Some(std::collections::BTreeMap::from([ + ("alertmanagerConfig".to_string(), "enabled".to_string()), + ("client".to_string(), "prometheus".to_string()), + ( + "app.kubernetes.io/name".to_string(), + "kube-state-metrics".to_string(), + ), + ])), + namespace: Some(self.sender.namespace.clone()), + ..Default::default() + }, + spec: ServiceMonitorSpec::default(), + }; + monitors.push(default_service_monitor); + for monitor in monitors.iter() { + client + .apply(monitor, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + } + Ok(Outcome::success( + "succesfully deployed service monitors".to_string(), + )) + } + + async fn install_rules( + &self, + #[allow(clippy::ptr_arg)] rules: &Vec, + client: &Arc, + ) -> Result { + let mut prom_rule_spec = PrometheusRuleSpec { + groups: rules.clone(), + }; + + let default_rules_group = RuleGroup { + name: "default-rules".to_string(), + rules: crate::modules::monitoring::kube_prometheus::crd::rhob_default_rules::build_default_application_rules(), + }; + + prom_rule_spec.groups.push(default_rules_group); + let prom_rules = PrometheusRule { + metadata: ObjectMeta { + name: Some(self.sender.namespace.clone()), + labels: Some(std::collections::BTreeMap::from([ + ("alertmanagerConfig".to_string(), "enabled".to_string()), + ("role".to_string(), "prometheus-rule".to_string()), + ])), + namespace: Some(self.sender.namespace.clone()), + ..Default::default() + }, + spec: prom_rule_spec, + }; + client + .apply(&prom_rules, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + Ok(Outcome::success(format!( + "successfully deployed rules {:#?}", + prom_rules.metadata.name + ))) + } + + async fn install_client_kube_metrics(&self) -> Result { + self.install_chart( + "oci://hub.nationtech.io/harmony".to_string(), + "nt-kube-metrics".to_string(), + ) + .await?; + Ok(Outcome::success(format!( + "Installed client kube metrics in ns {}", + &self.sender.namespace.clone() + ))) + } + + async fn install_grafana(&self, client: &Arc) -> Result { + let mut label = BTreeMap::new(); + label.insert("dashboards".to_string(), "grafana".to_string()); + let labels = LabelSelector { + match_labels: label.clone(), + match_expressions: vec![], + }; + let mut json_data = BTreeMap::new(); + json_data.insert("timeInterval".to_string(), "5s".to_string()); + let namespace = self.sender.namespace.clone(); + + let json = build_default_dashboard(&namespace); + + let graf_data_source = GrafanaDatasource { + metadata: ObjectMeta { + name: Some(format!( + "grafana-datasource-{}", + self.sender.namespace.clone() + )), + namespace: Some(self.sender.namespace.clone()), + ..Default::default() + }, + spec: GrafanaDatasourceSpec { + instance_selector: labels.clone(), + allow_cross_namespace_import: Some(false), + datasource: GrafanaDatasourceConfig { + access: "proxy".to_string(), + database: Some("prometheus".to_string()), + json_data: Some(json_data), + //this is fragile + name: format!("prometheus-{}-0", self.sender.namespace.clone()), + r#type: "prometheus".to_string(), + url: format!( + "http://prometheus-operated.{}.svc.cluster.local:9090", + self.sender.namespace.clone() + ), + }, + }, + }; + + client + .apply(&graf_data_source, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + + let graf_dashboard = GrafanaDashboard { + metadata: ObjectMeta { + name: Some(format!( + "grafana-dashboard-{}", + self.sender.namespace.clone() + )), + namespace: Some(self.sender.namespace.clone()), + ..Default::default() + }, + spec: GrafanaDashboardSpec { + resync_period: Some("30s".to_string()), + instance_selector: labels.clone(), + json, + }, + }; + + client + .apply(&graf_dashboard, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + + let grafana = Grafana { + metadata: ObjectMeta { + name: Some(format!("grafana-{}", self.sender.namespace.clone())), + namespace: Some(self.sender.namespace.clone()), + labels: Some(label.clone()), + ..Default::default() + }, + spec: GrafanaSpec { + config: None, + admin_user: None, + admin_password: None, + ingress: None, + persistence: None, + resources: None, + }, + }; + client + .apply(&grafana, Some(&self.sender.namespace.clone())) + .await + .map_err(|e| InterpretError::new(e.to_string()))?; + Ok(Outcome::success(format!( + "successfully deployed grafana instance {:#?}", + grafana.metadata.name + ))) + } + + async fn install_receivers( + &self, + sender: &RHOBObservability, + receivers: &Vec>>, + ) -> Result { + for receiver in receivers.iter() { + receiver.install(sender).await.map_err(|err| { + InterpretError::new(format!("failed to install receiver: {}", err)) + })?; + } + Ok(Outcome::success("successfully deployed receivers".into())) + } +} From 6ea5630d30395a6b2886d52a214999b407a9ee7b Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Mon, 8 Sep 2025 14:43:41 +0000 Subject: [PATCH 10/11] feat: add hurl! and local_folder! macros to make Url easier to create (#135) * it was named `hurl!` instead of just `url!` because it was clashing with the crate `url` so we would have been forced to use it with `harmony_macros::url!` which is less sexy Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/135 --- Cargo.lock | 1 + examples/rust/src/main.rs | 8 ++--- harmony_macros/Cargo.toml | 1 + harmony_macros/src/lib.rs | 68 +++++++++++++++++++++++++++++++++++++++ harmony_types/src/net.rs | 34 ++++++++++++++++++++ 5 files changed, 108 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b7fafe..82f2dbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2405,6 +2405,7 @@ dependencies = [ "serde", "serde_yaml", "syn 2.0.105", + "url", ] [[package]] diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index 031887b..063fdb6 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -13,13 +13,13 @@ use harmony::{ }, topology::K8sAnywhereTopology, }; -use harmony_types::net::Url; +use harmony_macros::hurl; #[tokio::main] async fn main() { let application = Arc::new(RustWebapp { name: "harmony-example-rust-webapp".to_string(), - domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()), + domain: hurl!("https://rustapp.harmony.example.com"), project_root: PathBuf::from("./webapp"), framework: Some(RustWebFramework::Leptos), service_port: 3000, @@ -27,12 +27,12 @@ async fn main() { let discord_receiver = DiscordWebhook { name: "test-discord".to_string(), - url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()), + url: hurl!("https://discord.doesnt.exist.com"), }; let webhook_receiver = WebhookReceiver { name: "sample-webhook-receiver".to_string(), - url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()), + url: hurl!("https://webhook-doesnt-exist.com"), }; let app = ApplicationScore { diff --git a/harmony_macros/Cargo.toml b/harmony_macros/Cargo.toml index 7185d0b..aed8808 100644 --- a/harmony_macros/Cargo.toml +++ b/harmony_macros/Cargo.toml @@ -15,6 +15,7 @@ serde = "1.0.217" serde_yaml = "0.9.34" syn = "2.0.90" cidr.workspace = true +url.workspace = true [dev-dependencies] serde = { version = "1.0.217", features = ["derive"] } diff --git a/harmony_macros/src/lib.rs b/harmony_macros/src/lib.rs index 2f77d1a..87ac818 100644 --- a/harmony_macros/src/lib.rs +++ b/harmony_macros/src/lib.rs @@ -145,3 +145,71 @@ pub fn cidrv4(input: TokenStream) -> TokenStream { panic!("Invalid IPv4 CIDR : {}", cidr_str); } + +/// Creates a `harmony_types::net::Url::Url` from a string literal. +/// +/// This macro parses the input string as a URL at compile time and will cause a +/// compilation error if the string is not a valid URL. +/// +/// # Example +/// +/// ``` +/// use harmony_types::net::Url; +/// use harmony_macros::hurl; +/// +/// let url = hurl!("https://example.com/path"); +/// +/// let expected_url = url::Url::parse("https://example.com/path").unwrap(); +/// assert!(matches!(url, Url::Url(expected_url))); +/// ``` +/// +/// The following example will fail to compile: +/// +/// ```rust,compile_fail +/// use harmony_macros::hurl; +/// +/// // This is not a valid URL and will cause a compilation error. +/// let _invalid = hurl!("not a valid url"); +/// ``` +#[proc_macro] +pub fn hurl(input: TokenStream) -> TokenStream { + let input_lit = parse_macro_input!(input as LitStr); + let url_str = input_lit.value(); + + match ::url::Url::parse(&url_str) { + Ok(_) => { + let expanded = quote! { + ::harmony_types::net::Url::Url(::url::Url::parse(#input_lit).unwrap()) + }; + TokenStream::from(expanded) + } + Err(e) => { + let err_msg = format!("Invalid URL: {e}"); + syn::Error::new(input_lit.span(), err_msg) + .to_compile_error() + .into() + } + } +} + +/// Creates a `harmony_types::net::Url::LocalFolder` from a string literal. +/// +/// # Example +/// +/// ``` +/// use harmony_types::net::Url; +/// use harmony_macros::local_folder; +/// +/// let local_path = local_folder!("/var/data/files"); +/// +/// let expected_path = String::from("/var/data/files"); +/// assert!(matches!(local_path, Url::LocalFolder(expected_path))); +/// ``` +#[proc_macro] +pub fn local_folder(input: TokenStream) -> TokenStream { + let input_lit = parse_macro_input!(input as LitStr); + let expanded = quote! { + ::harmony_types::net::Url::LocalFolder(#input_lit.to_string()) + }; + TokenStream::from(expanded) +} diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index caf023f..06a785a 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -51,6 +51,40 @@ impl TryFrom for MacAddress { pub type IpAddress = std::net::IpAddr; +/// Represents a URL, which can either be a remote URL or a local file path. +/// +/// For convenience, the `harmony_macros` crate provides `hurl!` and `local_folder!` +/// macros to construct `Url` variants from string literals. +/// +/// # Examples +/// +/// ### Manual Construction +/// +/// The following example demonstrates how to build `Url` variants directly. This is +/// the standard approach if you are not using the `harmony_macros` crate. +/// +/// ``` +/// // The `use` statement below is for the doc test. In a real project, +/// // you would use `use harmony_types::Url;` +/// # use harmony_types::net::Url; +/// let url = Url::Url(url::Url::parse("https://example.com").unwrap()); +/// let local_path = Url::LocalFolder("/var/data".to_string()); +/// +/// assert!(matches!(url, Url::Url(_))); +/// assert!(matches!(local_path, Url::LocalFolder(_))); +/// ``` +/// +/// ### Usage with `harmony_macros` +/// +/// If `harmony_macros` is a dependency, you can create `Url`s more concisely. +/// +/// ```rust,ignore +/// use harmony_macros::{hurl, local_folder}; +/// use harmony_types::Url; +/// +/// let hurl = hurl!("https://example.com"); +/// let local_path = local_folder!("/var/data"); +/// ``` #[derive(Debug, Clone)] pub enum Url { LocalFolder(String), From da5a869771f392866e9c1ba4bb185f073c239fd0 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Mon, 8 Sep 2025 19:06:17 +0000 Subject: [PATCH 11/11] feat(opnsense-config): dnsmasq dhcp static mappings (#130) Co-authored-by: Jean-Gabriel Gill-Couture Co-authored-by: Ian Letourneau Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/130 Reviewed-by: Ian Letourneau Co-authored-by: Jean-Gabriel Gill-Couture Co-committed-by: Jean-Gabriel Gill-Couture --- .gitattributes | 2 + .gitignore | 1 + ...a3fd4878dc2e217dc83f9bf45a402dfd62a91.json | 20 + ...090c94a222115c543231f2140cba27bd0f067.json | 32 + ...ba8bc9d708a4fb89d5593a0be2bbebde62aff.json | 12 + Cargo.lock | 688 +++---- adr/agent_discovery/mdns/src/discover.rs | 3 +- data/okd/bin/kubectl | 3 + data/okd/bin/oc | 3 + data/okd/bin/oc_README.md | 3 + data/okd/bin/openshift-install | 3 + data/okd/bin/openshift-install_README.md | 3 + ...s-9.0.20250510-0-live-initramfs.x86_64.img | 3 + .../scos-9.0.20250510-0-live-kernel.x86_64 | 3 + ...scos-9.0.20250510-0-live-rootfs.x86_64.img | 3 + .../scos-live-initramfs.x86_64.img | 1 + .../installer_image/scos-live-kernel.x86_64 | 1 + .../scos-live-rootfs.x86_64.img | 1 + docs/OKD_Host_preparation.md | 8 + examples/cli/src/main.rs | 4 +- examples/nanodc/Cargo.toml | 1 + examples/nanodc/src/main.rs | 24 +- examples/okd_installation/Cargo.toml | 21 + examples/okd_installation/env.sh | 4 + examples/okd_installation/src/main.rs | 34 + examples/okd_installation/src/topology.rs | 77 + examples/okd_installation/ssh_example_key | 7 + examples/okd_installation/ssh_example_key.pub | 1 + examples/okd_pxe/src/main.rs | 16 +- examples/okd_pxe/src/topology.rs | 14 +- examples/opnsense/src/main.rs | 3 +- examples/tui/src/main.rs | 2 + harmony/Cargo.toml | 4 +- .../src/domain/{config.rs => config/mod.rs} | 2 + harmony/src/domain/config/secret.rs | 20 + harmony/src/domain/hardware/mod.rs | 116 +- harmony/src/domain/interpret/mod.rs | 6 + harmony/src/domain/inventory/mod.rs | 12 +- harmony/src/domain/inventory/repository.rs | 15 +- harmony/src/domain/topology/ha_cluster.rs | 49 +- harmony/src/domain/topology/http.rs | 6 +- harmony/src/domain/topology/load_balancer.rs | 11 +- harmony/src/domain/topology/network.rs | 13 +- harmony/src/infra/inventory/sqlite.rs | 94 +- harmony/src/infra/opnsense/dhcp.rs | 19 +- harmony/src/infra/opnsense/dns.rs | 82 +- harmony/src/infra/opnsense/http.rs | 12 +- harmony/src/infra/opnsense/load_balancer.rs | 35 +- harmony/src/modules/dhcp.rs | 175 +- harmony/src/modules/http.rs | 42 +- harmony/src/modules/inventory/discovery.rs | 122 ++ harmony/src/modules/inventory/inspect.rs | 72 + harmony/src/modules/inventory/mod.rs | 17 +- harmony/src/modules/ipxe.rs | 67 - harmony/src/modules/mod.rs | 1 - .../src/modules/okd/bootstrap_01_prepare.rs | 120 ++ .../src/modules/okd/bootstrap_02_bootstrap.rs | 387 ++++ .../modules/okd/bootstrap_03_control_plane.rs | 277 +++ .../src/modules/okd/bootstrap_04_workers.rs | 102 + .../modules/okd/bootstrap_05_sanity_check.rs | 101 + .../okd/bootstrap_06_installation_report.rs | 101 + harmony/src/modules/okd/bootstrap_dhcp.rs | 32 +- .../modules/okd/bootstrap_load_balancer.rs | 12 +- harmony/src/modules/okd/dhcp.rs | 15 + harmony/src/modules/okd/installation.rs | 799 +------- harmony/src/modules/okd/ipxe.rs | 39 +- harmony/src/modules/okd/load_balancer.rs | 3 +- harmony/src/modules/okd/mod.rs | 13 + harmony/src/modules/okd/templates.rs | 20 + .../ceph/ceph_validate_health_score.rs | 2 +- harmony/templates/boot.ipxe.j2 | 59 +- harmony/templates/okd/bootstrap.ipxe.j2 | 52 + harmony/templates/okd/install-config.yaml.j2 | 24 + harmony_secret/Cargo.toml | 1 + harmony_secret/src/lib.rs | 38 + harmony_secret/src/store/local_file.rs | 4 +- harmony_types/src/id.rs | 6 + harmony_types/src/net.rs | 2 +- .../20250902035357_Host_role_mapping.sql | 5 + opnsense-config-xml/src/data/dnsmasq.rs | 21 + opnsense-config-xml/src/data/opnsense.rs | 13 +- opnsense-config/Cargo.toml | 1 + opnsense-config/src/config/config.rs | 51 +- .../src/config/manager/local_file.rs | 13 +- opnsense-config/src/config/manager/mod.rs | 6 +- opnsense-config/src/config/manager/ssh.rs | 37 +- opnsense-config/src/config/shell/ssh.rs | 2 +- opnsense-config/src/modules/dhcp.rs | 2 +- opnsense-config/src/modules/dns.rs | 4 +- opnsense-config/src/modules/dnsmasq.rs | 542 +++++- .../data/config-25.7-dnsmasq-static-host.xml | 1674 +++++++++++++++++ .../src/tests/data/config-full-1.xml | 1 - ...ig-structure-with-dhcp-staticmap-entry.xml | 1 - .../src/tests/data/config-structure.xml | 1 - 94 files changed, 5107 insertions(+), 1469 deletions(-) create mode 100644 .sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json create mode 100644 .sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json create mode 100644 .sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json create mode 100755 data/okd/bin/kubectl create mode 100755 data/okd/bin/oc create mode 100644 data/okd/bin/oc_README.md create mode 100755 data/okd/bin/openshift-install create mode 100644 data/okd/bin/openshift-install_README.md create mode 100644 data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img create mode 100644 data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64 create mode 100644 data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img create mode 120000 data/okd/installer_image/scos-live-initramfs.x86_64.img create mode 120000 data/okd/installer_image/scos-live-kernel.x86_64 create mode 120000 data/okd/installer_image/scos-live-rootfs.x86_64.img create mode 100644 docs/OKD_Host_preparation.md create mode 100644 examples/okd_installation/Cargo.toml create mode 100644 examples/okd_installation/env.sh create mode 100644 examples/okd_installation/src/main.rs create mode 100644 examples/okd_installation/src/topology.rs create mode 100644 examples/okd_installation/ssh_example_key create mode 100644 examples/okd_installation/ssh_example_key.pub rename harmony/src/domain/{config.rs => config/mod.rs} (98%) create mode 100644 harmony/src/domain/config/secret.rs create mode 100644 harmony/src/modules/inventory/discovery.rs create mode 100644 harmony/src/modules/inventory/inspect.rs delete mode 100644 harmony/src/modules/ipxe.rs create mode 100644 harmony/src/modules/okd/bootstrap_01_prepare.rs create mode 100644 harmony/src/modules/okd/bootstrap_02_bootstrap.rs create mode 100644 harmony/src/modules/okd/bootstrap_03_control_plane.rs create mode 100644 harmony/src/modules/okd/bootstrap_04_workers.rs create mode 100644 harmony/src/modules/okd/bootstrap_05_sanity_check.rs create mode 100644 harmony/src/modules/okd/bootstrap_06_installation_report.rs create mode 100644 harmony/src/modules/okd/templates.rs create mode 100644 harmony/templates/okd/bootstrap.ipxe.j2 create mode 100644 harmony/templates/okd/install-config.yaml.j2 create mode 100644 migrations/20250902035357_Host_role_mapping.sql create mode 100644 opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml diff --git a/.gitattributes b/.gitattributes index e5e8283..475c220 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,5 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text grubx64.efi filter=lfs diff=lfs merge=lfs -text initrd filter=lfs diff=lfs merge=lfs -text linux filter=lfs diff=lfs merge=lfs -text +data/okd/bin/* filter=lfs diff=lfs merge=lfs -text +data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index 149050f..3850d09 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ private_repos/ ### Harmony ### harmony.log +data/okd/installation_files* ### Helm ### # Chart dependencies diff --git a/.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json b/.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json new file mode 100644 index 0000000..4245c23 --- /dev/null +++ b/.sqlx/query-2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "SELECT host_id FROM host_role_mapping WHERE role = ?", + "describe": { + "columns": [ + { + "name": "host_id", + "ordinal": 0, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false + ] + }, + "hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91" +} diff --git a/.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json b/.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json new file mode 100644 index 0000000..0b92e37 --- /dev/null +++ b/.sqlx/query-8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067.json @@ -0,0 +1,32 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT\n p1.id,\n p1.version_id,\n p1.data as \"data: Json\"\n FROM\n physical_hosts p1\n INNER JOIN (\n SELECT\n id,\n MAX(version_id) AS max_version\n FROM\n physical_hosts\n GROUP BY\n id\n ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n ", + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "version_id", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "data: Json", + "ordinal": 2, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067" +} diff --git a/.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json b/.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json new file mode 100644 index 0000000..eb799e9 --- /dev/null +++ b/.sqlx/query-df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff" +} diff --git a/Cargo.lock b/Cargo.lock index 62d8aee..e87eede 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "futures-core", "futures-sink", @@ -21,16 +21,16 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.11.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44dfe5c9e0004c623edc65391dfd51daa201e7e30ebd9c9bedf873048ec32bc2" +checksum = "44cceded2fb55f3c4b67068fa64962e2ca59614edc5b03167de9ff82ae803da0" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "base64 0.22.1", - "bitflags 2.9.1", + "bitflags 2.9.4", "brotli", "bytes", "bytestring", @@ -39,7 +39,7 @@ dependencies = [ "flate2", "foldhash", "futures-core", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "httparse", "httpdate", @@ -49,7 +49,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rand 0.9.1", + "rand 0.9.2", "sha1", "smallvec", "tokio", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -85,9 +85,9 @@ dependencies = [ [[package]] name = "actix-rt" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" +checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63" dependencies = [ "futures-core", "tokio", @@ -182,7 +182,7 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -311,9 +311,9 @@ checksum = "6b3568b48b7cefa6b8ce125f9bb4989e52fbcc29ebea88df04cc7c5f12f70455" [[package]] name = "anstream" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" dependencies = [ "anstyle", "anstyle-parse", @@ -341,29 +341,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.9" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" [[package]] name = "arc-swap" @@ -398,7 +398,7 @@ dependencies = [ "rustc-hash", "serde", "serde_derive", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -460,18 +460,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -497,9 +497,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "backon" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7" +checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d" dependencies = [ "fastrand", "gloo-timers", @@ -573,9 +573,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" dependencies = [ "serde", ] @@ -622,9 +622,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899ca34eb6924d6ec2a77c6f7f5c7339e60fd68235eaf91edd5a15f12958bb06" +checksum = "8796b390a5b4c86f9f2e8173a68c2791f4fa6b038b84e96dbc01c016d1e6722c" dependencies = [ "base64 0.22.1", "bollard-stubs", @@ -634,7 +634,7 @@ dependencies = [ "hex", "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-named-pipe", "hyper-util", "hyperlocal", @@ -645,7 +645,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-util", "tower-service", @@ -655,9 +655,9 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.48.3-rc.28.0.4" +version = "1.49.0-rc.28.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ea257e555d16a2c01e5593f40b73865cdf12efbceda33c6d14a2d8d1490368" +checksum = "2e7814991259013d5a5bee4ae28657dae0747d843cf06c40f7fc0c2894d6fa38" dependencies = [ "serde", "serde_json", @@ -732,9 +732,9 @@ checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" [[package]] name = "camino" -version = "1.1.10" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -776,7 +776,7 @@ dependencies = [ "semver", "serde", "serde_json", - "thiserror 2.0.14", + "thiserror 2.0.16", ] [[package]] @@ -787,9 +787,9 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] @@ -805,10 +805,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.27" +version = "1.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" +checksum = "590f9024a68a8c40351881787f1934dc11afd69090f5edb6831464694d836ea3" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -816,9 +817,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "cfg_aliases" @@ -873,9 +874,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.46" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" +checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" dependencies = [ "clap_builder", "clap_derive", @@ -883,9 +884,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.46" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" +checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" dependencies = [ "anstream", "anstyle", @@ -895,14 +896,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.45" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6" +checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1078,9 +1079,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1150,7 +1151,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "crossterm_winapi", "futures-core", "mio 1.0.4", @@ -1238,7 +1239,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1262,7 +1263,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1273,7 +1274,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1295,9 +1296,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc" dependencies = [ "powerfmt", "serde", @@ -1311,7 +1312,7 @@ checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1331,7 +1332,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", "unicode-xid", ] @@ -1397,7 +1398,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1439,7 +1440,7 @@ dependencies = [ "anyhow", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1450,9 +1451,9 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dyn-clone" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" @@ -1480,9 +1481,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", @@ -1502,7 +1503,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1576,7 +1577,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -1641,9 +1642,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -1758,6 +1759,7 @@ dependencies = [ "env_logger", "harmony", "harmony_macros", + "harmony_secret", "harmony_tui", "harmony_types", "log", @@ -1775,6 +1777,24 @@ dependencies = [ "url", ] +[[package]] +name = "example-okd-install" +version = "0.1.0" +dependencies = [ + "cidr", + "env_logger", + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_secret", + "harmony_secret_derive", + "harmony_types", + "log", + "serde", + "tokio", + "url", +] + [[package]] name = "example-opnsense" version = "0.1.0" @@ -1918,16 +1938,22 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e178e4fba8a2726903f6ba98a6d221e76f9c12c650d5dc0e6afdc50677b49650" + [[package]] name = "flate2" version = "1.1.2" @@ -1975,9 +2001,9 @@ checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -2065,7 +2091,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -2178,7 +2204,7 @@ dependencies = [ "js-sys", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasi 0.14.3+wasi-0.2.4", "wasm-bindgen", ] @@ -2223,9 +2249,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -2233,7 +2259,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.10.0", + "indexmap 2.11.0", "slab", "tokio", "tokio-util", @@ -2242,9 +2268,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -2252,7 +2278,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.10.0", + "indexmap 2.11.0", "slab", "tokio", "tokio-util", @@ -2280,11 +2306,13 @@ dependencies = [ "futures-util", "harmony_inventory_agent", "harmony_macros", + "harmony_secret", "harmony_secret_derive", "harmony_types", "helm-wrapper-rs", "hex", "http 1.3.1", + "inquire", "k3d-rs", "k8s-openapi", "kube", @@ -2308,12 +2336,12 @@ dependencies = [ "serde_yaml", "similar", "sqlx", - "strum 0.27.1", + "strum 0.27.2", "tar", "temp-dir", "temp-file", "tempfile", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-util", "url", @@ -2371,11 +2399,11 @@ dependencies = [ "local-ip-address", "log", "mdns-sd 0.14.1 (git+https://github.com/jggc/mdns-sd.git?branch=patch-1)", - "reqwest 0.12.20", + "reqwest 0.12.23", "serde", "serde_json", "sysinfo", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", ] @@ -2388,7 +2416,7 @@ dependencies = [ "quote", "serde", "serde_yaml", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -2400,13 +2428,14 @@ dependencies = [ "harmony_secret_derive", "http 1.3.1", "infisical", + "inquire", "lazy_static", "log", "pretty_assertions", "serde", "serde_json", "tempfile", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", ] @@ -2417,7 +2446,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -2442,7 +2471,7 @@ dependencies = [ name = "harmony_types" version = "0.1.0" dependencies = [ - "rand 0.9.1", + "rand 0.9.2", "serde", "url", ] @@ -2455,9 +2484,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", @@ -2470,7 +2499,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -2513,7 +2542,7 @@ dependencies = [ "non-blank-string-rs", "serde", "serde_json", - "thiserror 2.0.14", + "thiserror 2.0.16", ] [[package]] @@ -2653,7 +2682,7 @@ dependencies = [ "futures", "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "log", "once_cell", @@ -2674,7 +2703,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -2690,20 +2719,22 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.10", + "futures-core", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "httparse", "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -2719,7 +2750,7 @@ dependencies = [ "futures-util", "headers", "http 1.3.1", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-rustls 0.27.7", "hyper-util", "pin-project-lite", @@ -2736,7 +2767,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" dependencies = [ "hex", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -2765,10 +2796,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "log", - "rustls 0.23.28", + "rustls 0.23.31", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", @@ -2783,7 +2814,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -2792,9 +2823,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" dependencies = [ "base64 0.22.1", "bytes", @@ -2803,12 +2834,12 @@ dependencies = [ "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.7.0", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -2822,7 +2853,7 @@ checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" dependencies = [ "hex", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -2947,9 +2978,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2984,9 +3015,9 @@ checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2" [[package]] name = "indenter" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" [[package]] name = "indexmap" @@ -3001,12 +3032,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" dependencies = [ "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "serde", ] @@ -3045,7 +3076,7 @@ version = "0.0.2" source = "git+https://github.com/jggc/rust-sdk.git?branch=patch-1#30d820194d29491411bd14f6c2e18ec500bb0b14" dependencies = [ "base64 0.22.1", - "reqwest 0.12.20", + "reqwest 0.12.23", "serde", "serde_json", "thiserror 1.0.69", @@ -3069,7 +3100,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fddf93031af70e75410a2511ec04d49e758ed2f26dad3404a934e0fb45cc12a" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "crossterm 0.25.0", "dyn-clone", "fuzzy-matcher", @@ -3082,24 +3113,24 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" +checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" dependencies = [ "darling", "indoc", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "io-uring" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "cfg-if", "libc", ] @@ -3162,14 +3193,14 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ "getrandom 0.3.3", "libc", @@ -3207,7 +3238,7 @@ dependencies = [ "pest_derive", "regex", "serde_json", - "thiserror 2.0.14", + "thiserror 2.0.16", ] [[package]] @@ -3248,7 +3279,7 @@ dependencies = [ "octocrab", "pretty_assertions", "regex", - "reqwest 0.12.20", + "reqwest 0.12.23", "sha2", "tokio", "url", @@ -3294,7 +3325,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-http-proxy", "hyper-rustls 0.27.7", "hyper-timeout", @@ -3303,12 +3334,12 @@ dependencies = [ "k8s-openapi", "kube-core", "pem", - "rustls 0.23.28", + "rustls 0.23.31", "secrecy", "serde", "serde_json", "serde_yaml", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-tungstenite", "tokio-util", @@ -3333,7 +3364,7 @@ dependencies = [ "serde", "serde-value", "serde_json", - "thiserror 2.0.14", + "thiserror 2.0.16", ] [[package]] @@ -3347,7 +3378,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -3362,7 +3393,7 @@ dependencies = [ "backon", "educe", "futures", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "hostname", "json-patch", "k8s-openapi", @@ -3371,7 +3402,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-util", "tracing", @@ -3394,9 +3425,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.174" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libm" @@ -3406,11 +3437,11 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.4" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "libc", "redox_syscall", ] @@ -3463,7 +3494,7 @@ checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 2.0.14", + "thiserror 2.0.16", "windows-sys 0.59.0", ] @@ -3513,7 +3544,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -3780,7 +3811,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-rustls 0.27.7", "hyper-timeout", "hyper-util", @@ -3841,6 +3872,7 @@ dependencies = [ "russh-sftp", "serde", "serde_json", + "sha2", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -3856,9 +3888,9 @@ dependencies = [ "env_logger", "log", "pretty_assertions", - "rand 0.9.1", + "rand 0.9.2", "serde", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "uuid", "xml-rs", @@ -4014,9 +4046,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" @@ -4025,7 +4057,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", - "thiserror 2.0.14", + "thiserror 2.0.16", "ucd-trie", ] @@ -4049,7 +4081,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -4079,7 +4111,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -4178,9 +4210,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -4257,9 +4289,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -4272,9 +4304,9 @@ checksum = "e9e1dcb320d6839f6edb64f7a4a59d39b30480d4d1765b56873f7c858538a5fe" [[package]] name = "quinn" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -4282,9 +4314,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.28", - "socket2 0.5.10", - "thiserror 2.0.14", + "rustls 0.23.31", + "socket2 0.6.0", + "thiserror 2.0.16", "tokio", "tracing", "web-time", @@ -4292,20 +4324,20 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", - "rand 0.9.1", + "rand 0.9.2", "ring", "rustc-hash", - "rustls 0.23.28", + "rustls 0.23.31", "rustls-pki-types", "slab", - "thiserror 2.0.14", + "thiserror 2.0.16", "tinyvec", "tracing", "web-time", @@ -4313,16 +4345,16 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.0", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4359,9 +4391,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -4411,7 +4443,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "cassowary", "compact_str", "crossterm 0.28.1", @@ -4448,22 +4480,22 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.13" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", ] [[package]] name = "redox_users" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.16", "libredox", - "thiserror 2.0.14", + "thiserror 2.0.16", ] [[package]] @@ -4483,14 +4515,14 @@ checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick 1.1.3", "memchr", @@ -4500,9 +4532,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick 1.1.3", "memchr", @@ -4511,15 +4543,15 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" +checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "reqwest" @@ -4532,7 +4564,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -4564,20 +4596,20 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.4.10", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-rustls 0.27.7", "hyper-util", "js-sys", @@ -4585,7 +4617,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.28", + "rustls 0.23.31", "rustls-pki-types", "serde", "serde_json", @@ -4659,7 +4691,7 @@ dependencies = [ "aes", "aes-gcm", "async-trait", - "bitflags 2.9.1", + "bitflags 2.9.4", "byteorder", "cbc", "chacha20", @@ -4760,13 +4792,13 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bb94393cafad0530145b8f626d8687f1ee1dedb93d7ba7740d6ae81868b13b5" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "chrono", "flurry", "log", "serde", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-util", ] @@ -4788,9 +4820,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -4813,7 +4845,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.15", @@ -4822,15 +4854,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4847,15 +4879,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.28" +version = "0.23.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.3", + "rustls-webpki 0.103.4", "subtle", "zeroize", ] @@ -4882,7 +4914,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.3.0", ] [[package]] @@ -4925,9 +4957,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", "rustls-pki-types", @@ -4936,9 +4968,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -5009,7 +5041,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5068,7 +5100,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -5077,11 +5109,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -5124,9 +5156,9 @@ dependencies = [ [[package]] name = "serde-untagged" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "299d9c19d7d466db4ab10addd5703e4c615dec2a5a16dbbafe191045e87ee66e" +checksum = "34836a629bcbc6f1afdf0907a744870039b1e14c0561cb26094fa683b158eff3" dependencies = [ "erased-serde", "serde", @@ -5151,7 +5183,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5162,14 +5194,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.143" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" dependencies = [ "itoa", "memchr", @@ -5195,7 +5227,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5216,7 +5248,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5241,7 +5273,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.10.0", + "indexmap 2.11.0", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -5260,7 +5292,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5269,7 +5301,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.0", "itoa", "ryu", "serde", @@ -5337,9 +5369,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -5368,15 +5400,15 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.14", + "thiserror 2.0.16", "time", ] [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -5389,23 +5421,23 @@ dependencies = [ [[package]] name = "snafu" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320b01e011bf8d5d7a4a4a4be966d9160968935849c83b918827f6a435e7f627" +checksum = "4800ae0e2ebdfaea32ffb9745642acdc378740dcbd74d3fb3cd87572a34810c6" dependencies = [ "snafu-derive", ] [[package]] name = "snafu-derive" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1961e2ef424c1424204d3a5d6975f934f56b6d50ff5732382d84ebf460e147f7" +checksum = "186f5ba9999528053fb497fdf0dd330efcc69cfe4ad03776c9d704bc54fee10f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5487,9 +5519,9 @@ dependencies = [ "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "hashlink", - "indexmap 2.10.0", + "indexmap 2.11.0", "log", "memchr", "once_cell", @@ -5498,7 +5530,7 @@ dependencies = [ "serde_json", "sha2", "smallvec", - "thiserror 2.0.14", + "thiserror 2.0.16", "tokio", "tokio-stream", "tracing", @@ -5515,7 +5547,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5538,7 +5570,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.105", + "syn 2.0.106", "tokio", "url", ] @@ -5551,7 +5583,7 @@ checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.9.1", + "bitflags 2.9.4", "byteorder", "bytes", "crc", @@ -5580,7 +5612,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.14", + "thiserror 2.0.16", "tracing", "whoami", ] @@ -5593,7 +5625,7 @@ checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.9.1", + "bitflags 2.9.4", "byteorder", "crc", "dotenvy", @@ -5617,7 +5649,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.14", + "thiserror 2.0.16", "tracing", "whoami", ] @@ -5641,7 +5673,7 @@ dependencies = [ "serde", "serde_urlencoded", "sqlx-core", - "thiserror 2.0.14", + "thiserror 2.0.16", "tracing", "url", ] @@ -5737,11 +5769,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -5754,20 +5786,19 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5789,9 +5820,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.105" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bc3fcb250e53458e712715cf74285c1f889686520d79294a9ef3bd7aa1fc619" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -5833,7 +5864,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5903,15 +5934,15 @@ checksum = "b5ff282c3f91797f0acb021f3af7fffa8a78601f0f2fd0a9f79ee7dcf9a9af9e" [[package]] name = "tempfile" -version = "3.20.0" +version = "3.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", - "windows-sys 0.59.0", + "rustix 1.0.8", + "windows-sys 0.60.2", ] [[package]] @@ -5931,11 +5962,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.14" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b0949c3a6c842cbde3f1686d6eea5a010516deb7085f79db747562d4102f41e" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" dependencies = [ - "thiserror-impl 2.0.14", + "thiserror-impl 2.0.16", ] [[package]] @@ -5946,18 +5977,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] name = "thiserror-impl" -version = "2.0.14" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5b44b4ab9c2fdd0e0512e6bece8388e214c0749f5862b114cc5b7a25daf227" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -5971,12 +6002,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "83bde6f1ec10e72d583d91623c939f623002284ef622b87de38cfd546cbf2031" dependencies = [ "deranged", - "itoa", "num-conv", "powerfmt", "serde", @@ -5986,15 +6016,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -6021,9 +6051,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -6062,7 +6092,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -6081,7 +6111,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.28", + "rustls 0.23.31", "tokio", ] @@ -6110,9 +6140,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -6149,7 +6179,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.0", "serde", "serde_spanned", "toml_datetime", @@ -6187,7 +6217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "base64 0.22.1", - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "futures-util", "http 1.3.1", @@ -6233,7 +6263,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -6258,9 +6288,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "sharded-slab", "thread_local", @@ -6299,9 +6329,9 @@ dependencies = [ "http 1.3.1", "httparse", "log", - "rand 0.9.1", + "rand 0.9.2", "sha1", - "thiserror 2.0.14", + "thiserror 2.0.16", "utf-8", ] @@ -6415,9 +6445,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", @@ -6445,26 +6475,26 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", - "rand 0.9.1", + "rand 0.9.2", "uuid-macro-internal", "wasm-bindgen", ] [[package]] name = "uuid-macro-internal" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7" +checksum = "d9384a660318abfbd7f8932c34d67e4d1ec511095f95972ddc01e19d7ba8413f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -6511,11 +6541,11 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.3+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] @@ -6546,7 +6576,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", "wasm-bindgen-shared", ] @@ -6581,7 +6611,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6722,7 +6752,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -6733,7 +6763,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -6793,7 +6823,7 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.2", + "windows-targets 0.53.3", ] [[package]] @@ -6829,10 +6859,11 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" dependencies = [ + "windows-link", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -6983,9 +7014,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -7001,13 +7032,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.1", -] +checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" [[package]] name = "writeable" @@ -7031,14 +7059,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix 1.0.7", + "rustix 1.0.8", ] [[package]] name = "xml-rs" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62ce76d9b56901b19a74f19431b0d8b3bc7ca4ad685a746dfd78ca8f4fc6bda" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" [[package]] name = "yansi" @@ -7049,7 +7077,7 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yaserde" version = "0.12.0" -source = "git+https://github.com/jggc/yaserde.git#c94ca32b6505f9c9a668702a1b1f1f88c6374301" +source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def" dependencies = [ "log", "xml-rs", @@ -7058,7 +7086,7 @@ dependencies = [ [[package]] name = "yaserde_derive" version = "0.12.0" -source = "git+https://github.com/jggc/yaserde.git#c94ca32b6505f9c9a668702a1b1f1f88c6374301" +source = "git+https://github.com/jggc/yaserde.git#adfdb1c5f4d054f114e5bd0ea7bda9c07a369def" dependencies = [ "heck", "log", @@ -7066,7 +7094,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.105", + "syn 2.0.106", "xml-rs", ] @@ -7090,7 +7118,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", "synstructure 0.13.2", ] @@ -7111,7 +7139,7 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] @@ -7131,7 +7159,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", "synstructure 0.13.2", ] @@ -7154,9 +7182,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", @@ -7171,7 +7199,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.105", + "syn 2.0.106", ] [[package]] diff --git a/adr/agent_discovery/mdns/src/discover.rs b/adr/agent_discovery/mdns/src/discover.rs index bf339de..276ca5c 100644 --- a/adr/agent_discovery/mdns/src/discover.rs +++ b/adr/agent_discovery/mdns/src/discover.rs @@ -1,4 +1,3 @@ -use log::debug; use mdns_sd::{ServiceDaemon, ServiceEvent}; use crate::SERVICE_TYPE; @@ -74,7 +73,7 @@ pub async fn discover() { // } } -async fn discover_example() { +async fn _discover_example() { use mdns_sd::{ServiceDaemon, ServiceEvent}; // Create a daemon diff --git a/data/okd/bin/kubectl b/data/okd/bin/kubectl new file mode 100755 index 0000000..e678ff0 --- /dev/null +++ b/data/okd/bin/kubectl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c00e6cf8aeec70327e3c3a6d6efbedae34742e64af7a6f4380e4325827c3eb2 +size 123112560 diff --git a/data/okd/bin/oc b/data/okd/bin/oc new file mode 100755 index 0000000..e678ff0 --- /dev/null +++ b/data/okd/bin/oc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c00e6cf8aeec70327e3c3a6d6efbedae34742e64af7a6f4380e4325827c3eb2 +size 123112560 diff --git a/data/okd/bin/oc_README.md b/data/okd/bin/oc_README.md new file mode 100644 index 0000000..e0934fd --- /dev/null +++ b/data/okd/bin/oc_README.md @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75e9b59be7d37cdd5a9a5e7059831c7f728f092ca5fcd41bc36be5649bab5a9a +size 954 diff --git a/data/okd/bin/openshift-install b/data/okd/bin/openshift-install new file mode 100755 index 0000000..4837740 --- /dev/null +++ b/data/okd/bin/openshift-install @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:603d7920a886db2a7678fa8366a2964b5658ce153aaf6649a9b6772906dfc0ad +size 596820120 diff --git a/data/okd/bin/openshift-install_README.md b/data/okd/bin/openshift-install_README.md new file mode 100644 index 0000000..d082a4d --- /dev/null +++ b/data/okd/bin/openshift-install_README.md @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3885ee9469f6eb63c6c60b6b170d4c3766c4d255a677781418e3078e04601fd2 +size 706 diff --git a/data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img b/data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img new file mode 100644 index 0000000..6109433 --- /dev/null +++ b/data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c662e3b281ae4ce9d3a0b94c8286ef37ec7e452c1d3342d2b4dac734f8048d2e +size 101785184 diff --git a/data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64 b/data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64 new file mode 100644 index 0000000..86ccd1e --- /dev/null +++ b/data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dceac0b7809536dea5ff109d231b487a2be4cad742e1152c1268cd800dd6450b +size 14968872 diff --git a/data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img b/data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img new file mode 100644 index 0000000..5b297fa --- /dev/null +++ b/data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13469a5a76029ad4793e81ca3b7527c3ecce50e8783c7ecd88c08397b4729595 +size 1071223296 diff --git a/data/okd/installer_image/scos-live-initramfs.x86_64.img b/data/okd/installer_image/scos-live-initramfs.x86_64.img new file mode 120000 index 0000000..1bd01a0 --- /dev/null +++ b/data/okd/installer_image/scos-live-initramfs.x86_64.img @@ -0,0 +1 @@ +scos-9.0.20250510-0-live-initramfs.x86_64.img \ No newline at end of file diff --git a/data/okd/installer_image/scos-live-kernel.x86_64 b/data/okd/installer_image/scos-live-kernel.x86_64 new file mode 120000 index 0000000..8a83a5c --- /dev/null +++ b/data/okd/installer_image/scos-live-kernel.x86_64 @@ -0,0 +1 @@ +scos-9.0.20250510-0-live-kernel.x86_64 \ No newline at end of file diff --git a/data/okd/installer_image/scos-live-rootfs.x86_64.img b/data/okd/installer_image/scos-live-rootfs.x86_64.img new file mode 120000 index 0000000..ae3a74b --- /dev/null +++ b/data/okd/installer_image/scos-live-rootfs.x86_64.img @@ -0,0 +1 @@ +scos-9.0.20250510-0-live-rootfs.x86_64.img \ No newline at end of file diff --git a/docs/OKD_Host_preparation.md b/docs/OKD_Host_preparation.md new file mode 100644 index 0000000..7f03f04 --- /dev/null +++ b/docs/OKD_Host_preparation.md @@ -0,0 +1,8 @@ +## Bios settings + +1. CSM : Disabled (compatibility support to boot gpt formatted drives) +2. Secure boot : disabled +3. Boot order : + 1. Local Hard drive + 2. PXE IPv4 +4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error diff --git a/examples/cli/src/main.rs b/examples/cli/src/main.rs index 524d69c..a8bc901 100644 --- a/examples/cli/src/main.rs +++ b/examples/cli/src/main.rs @@ -2,7 +2,7 @@ use harmony::{ inventory::Inventory, modules::{ dummy::{ErrorScore, PanicScore, SuccessScore}, - inventory::DiscoverInventoryAgentScore, + inventory::LaunchDiscoverInventoryAgentScore, }, topology::LocalhostTopology, }; @@ -16,7 +16,7 @@ async fn main() { Box::new(SuccessScore {}), Box::new(ErrorScore {}), Box::new(PanicScore {}), - Box::new(DiscoverInventoryAgentScore { + Box::new(LaunchDiscoverInventoryAgentScore { discovery_timeout: Some(10), }), ], diff --git a/examples/nanodc/Cargo.toml b/examples/nanodc/Cargo.toml index ccd3a3a..889c24d 100644 --- a/examples/nanodc/Cargo.toml +++ b/examples/nanodc/Cargo.toml @@ -13,6 +13,7 @@ harmony_types = { path = "../../harmony_types" } cidr = { workspace = true } tokio = { workspace = true } harmony_macros = { path = "../../harmony_macros" } +harmony_secret = { path = "../../harmony_secret" } log = { workspace = true } env_logger = { workspace = true } url = { workspace = true } diff --git a/examples/nanodc/src/main.rs b/examples/nanodc/src/main.rs index a6bb8e4..57574d2 100644 --- a/examples/nanodc/src/main.rs +++ b/examples/nanodc/src/main.rs @@ -5,22 +5,24 @@ use std::{ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + config::secret::SshKeyPair, + data::{FileContent, FilePath}, + hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, modules::{ http::StaticFilesHttpScore, - ipxe::IpxeScore, okd::{ bootstrap_dhcp::OKDBootstrapDhcpScore, bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, - dns::OKDDnsScore, + dns::OKDDnsScore, ipxe::OKDIpxeScore, }, tftp::TftpScore, }, topology::{LogicalHost, UnmanagedRouter}, }; use harmony_macros::{ip, mac_address}; +use harmony_secret::SecretManager; use harmony_types::net::Url; #[tokio::main] @@ -124,14 +126,28 @@ async fn main() { let load_balancer_score = harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); + let ssh_key = SecretManager::get_or_prompt::().await.unwrap(); + let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); let http_score = StaticFilesHttpScore { folder_to_serve: Some(Url::LocalFolder( "./data/watchguard/pxe-http-files".to_string(), )), files: vec![], + remote_path: None, + }; + + let kickstart_filename = "inventory.kickstart".to_string(); + let harmony_inventory_agent = "harmony_inventory_agent".to_string(); + + let ipxe_score = OKDIpxeScore { + kickstart_filename, + harmony_inventory_agent, + cluster_pubkey: FileContent { + path: FilePath::Relative("cluster_ssh_key.pub".to_string()), + content: ssh_key.public, + }, }; - let ipxe_score = IpxeScore::new(); harmony_tui::run( inventory, diff --git a/examples/okd_installation/Cargo.toml b/examples/okd_installation/Cargo.toml new file mode 100644 index 0000000..7314e4f --- /dev/null +++ b/examples/okd_installation/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "example-okd-install" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true +publish = false + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_types = { path = "../../harmony_types" } +harmony_secret = { path = "../../harmony_secret" } +harmony_secret_derive = { path = "../../harmony_secret_derive" } +cidr = { workspace = true } +tokio = { workspace = true } +harmony_macros = { path = "../../harmony_macros" } +log = { workspace = true } +env_logger = { workspace = true } +url = { workspace = true } +serde.workspace = true diff --git a/examples/okd_installation/env.sh b/examples/okd_installation/env.sh new file mode 100644 index 0000000..2df3da6 --- /dev/null +++ b/examples/okd_installation/env.sh @@ -0,0 +1,4 @@ +export HARMONY_SECRET_NAMESPACE=example-vms +export HARMONY_SECRET_STORE=file +export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info +export RUST_LOG=info diff --git a/examples/okd_installation/src/main.rs b/examples/okd_installation/src/main.rs new file mode 100644 index 0000000..e581d5d --- /dev/null +++ b/examples/okd_installation/src/main.rs @@ -0,0 +1,34 @@ +mod topology; + +use crate::topology::{get_inventory, get_topology}; +use harmony::{ + config::secret::SshKeyPair, + data::{FileContent, FilePath}, + modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore}, + score::Score, + topology::HAClusterTopology, +}; +use harmony_secret::SecretManager; + +#[tokio::main] +async fn main() { + let inventory = get_inventory(); + let topology = get_topology().await; + + let ssh_key = SecretManager::get_or_prompt::().await.unwrap(); + + let mut scores: Vec>> = vec![Box::new(OKDIpxeScore { + kickstart_filename: "inventory.kickstart".to_string(), + harmony_inventory_agent: "harmony_inventory_agent".to_string(), + cluster_pubkey: FileContent { + path: FilePath::Relative("cluster_ssh_key.pub".to_string()), + content: ssh_key.public, + }, + })]; + + scores.append(&mut OKDInstallationPipeline::get_all_scores().await); + + harmony_cli::run(inventory, topology, scores, None) + .await + .unwrap(); +} diff --git a/examples/okd_installation/src/topology.rs b/examples/okd_installation/src/topology.rs new file mode 100644 index 0000000..02553a5 --- /dev/null +++ b/examples/okd_installation/src/topology.rs @@ -0,0 +1,77 @@ +use cidr::Ipv4Cidr; +use harmony::{ + hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + infra::opnsense::OPNSenseManagementInterface, + inventory::Inventory, + topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, +}; +use harmony_macros::{ip, ipv4}; +use harmony_secret::{Secret, SecretManager}; +use serde::{Deserialize, Serialize}; +use std::{net::IpAddr, sync::Arc}; + +#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] +struct OPNSenseFirewallConfig { + username: String, + password: String, +} + +pub async fn get_topology() -> HAClusterTopology { + let firewall = harmony::topology::LogicalHost { + ip: ip!("192.168.1.1"), + name: String::from("opnsense-1"), + }; + + let config = SecretManager::get_or_prompt::().await; + let config = config.unwrap(); + + let opnsense = Arc::new( + harmony::infra::opnsense::OPNSenseFirewall::new( + firewall, + None, + &config.username, + &config.password, + ) + .await, + ); + let lan_subnet = ipv4!("192.168.1.0"); + let gateway_ipv4 = ipv4!("192.168.1.1"); + let gateway_ip = IpAddr::V4(gateway_ipv4); + harmony::topology::HAClusterTopology { + domain_name: "demo.harmony.mcd".to_string(), + router: Arc::new(UnmanagedRouter::new( + gateway_ip, + Ipv4Cidr::new(lan_subnet, 24).unwrap(), + )), + load_balancer: opnsense.clone(), + firewall: opnsense.clone(), + tftp_server: opnsense.clone(), + http_server: opnsense.clone(), + dhcp_server: opnsense.clone(), + dns_server: opnsense.clone(), + control_plane: vec![LogicalHost { + ip: ip!("192.168.1.20"), + name: "master".to_string(), + }], + bootstrap_host: LogicalHost { + ip: ip!("192.168.1.10"), + name: "bootstrap".to_string(), + }, + workers: vec![], + switch: vec![], + } +} + +pub fn get_inventory() -> Inventory { + Inventory { + location: Location::new( + "Some virtual machine or maybe a physical machine if you're cool".to_string(), + "testopnsense".to_string(), + ), + switch: SwitchGroup::from([]), + firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), + storage_host: vec![], + worker_host: vec![], + control_plane_host: vec![], + } +} diff --git a/examples/okd_installation/ssh_example_key b/examples/okd_installation/ssh_example_key new file mode 100644 index 0000000..272bfb3 --- /dev/null +++ b/examples/okd_installation/ssh_example_key @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA +jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA +AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ +/dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ== +-----END OPENSSH PRIVATE KEY----- diff --git a/examples/okd_installation/ssh_example_key.pub b/examples/okd_installation/ssh_example_key.pub new file mode 100644 index 0000000..8a68662 --- /dev/null +++ b/examples/okd_installation/ssh_example_key.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2 diff --git a/examples/okd_pxe/src/main.rs b/examples/okd_pxe/src/main.rs index 42e4729..bd638dd 100644 --- a/examples/okd_pxe/src/main.rs +++ b/examples/okd_pxe/src/main.rs @@ -1,7 +1,12 @@ mod topology; use crate::topology::{get_inventory, get_topology}; -use harmony::modules::okd::ipxe::OkdIpxeScore; +use harmony::{ + config::secret::SshKeyPair, + data::{FileContent, FilePath}, + modules::okd::ipxe::OKDIpxeScore, +}; +use harmony_secret::SecretManager; #[tokio::main] async fn main() { @@ -9,13 +14,16 @@ async fn main() { let topology = get_topology().await; let kickstart_filename = "inventory.kickstart".to_string(); - let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string(); let harmony_inventory_agent = "harmony_inventory_agent".to_string(); + let ssh_key = SecretManager::get_or_prompt::().await.unwrap(); - let ipxe_score = OkdIpxeScore { + let ipxe_score = OKDIpxeScore { kickstart_filename, harmony_inventory_agent, - cluster_pubkey_filename, + cluster_pubkey: FileContent { + path: FilePath::Relative("cluster_ssh_key.pub".to_string()), + content: ssh_key.public, + }, }; harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None) diff --git a/examples/okd_pxe/src/topology.rs b/examples/okd_pxe/src/topology.rs index 27eb8c0..707969a 100644 --- a/examples/okd_pxe/src/topology.rs +++ b/examples/okd_pxe/src/topology.rs @@ -1,28 +1,22 @@ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + config::secret::OPNSenseFirewallCredentials, + hardware::{Location, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, }; use harmony_macros::{ip, ipv4}; -use harmony_secret::{Secret, SecretManager}; -use serde::{Deserialize, Serialize}; +use harmony_secret::SecretManager; use std::{net::IpAddr, sync::Arc}; -#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] -struct OPNSenseFirewallConfig { - username: String, - password: String, -} - pub async fn get_topology() -> HAClusterTopology { let firewall = harmony::topology::LogicalHost { ip: ip!("192.168.1.1"), name: String::from("opnsense-1"), }; - let config = SecretManager::get::().await; + let config = SecretManager::get_or_prompt::().await; let config = config.unwrap(); let opnsense = Arc::new( diff --git a/examples/opnsense/src/main.rs b/examples/opnsense/src/main.rs index 465b0fa..fcfaf09 100644 --- a/examples/opnsense/src/main.rs +++ b/examples/opnsense/src/main.rs @@ -5,7 +5,7 @@ use std::{ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, modules::{ @@ -85,6 +85,7 @@ async fn main() { "./data/watchguard/pxe-http-files".to_string(), )), files: vec![], + remote_path: None, }; harmony_tui::run( diff --git a/examples/tui/src/main.rs b/examples/tui/src/main.rs index 4b1aabe..d9c85a6 100644 --- a/examples/tui/src/main.rs +++ b/examples/tui/src/main.rs @@ -9,6 +9,7 @@ use harmony::{ }, topology::{ BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService, + SSL, }, }; use harmony_macros::ipv4; @@ -47,6 +48,7 @@ fn build_large_score() -> LoadBalancerScore { .to_string(), HttpMethod::GET, HttpStatusCode::Success2xx, + SSL::Disabled, )), }; LoadBalancerScore { diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index 07a2480..1a97be4 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -67,9 +67,11 @@ base64.workspace = true thiserror.workspace = true once_cell = "1.21.3" harmony_inventory_agent = { path = "../harmony_inventory_agent" } -harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } +harmony_secret_derive = { path = "../harmony_secret_derive" } +harmony_secret = { path = "../harmony_secret" } askama.workspace = true sqlx.workspace = true +inquire.workspace = true [dev-dependencies] pretty_assertions.workspace = true diff --git a/harmony/src/domain/config.rs b/harmony/src/domain/config/mod.rs similarity index 98% rename from harmony/src/domain/config.rs rename to harmony/src/domain/config/mod.rs index 1a91684..fbffbf3 100644 --- a/harmony/src/domain/config.rs +++ b/harmony/src/domain/config/mod.rs @@ -1,3 +1,5 @@ +pub mod secret; + use lazy_static::lazy_static; use std::path::PathBuf; diff --git a/harmony/src/domain/config/secret.rs b/harmony/src/domain/config/secret.rs new file mode 100644 index 0000000..0253869 --- /dev/null +++ b/harmony/src/domain/config/secret.rs @@ -0,0 +1,20 @@ +use harmony_secret_derive::Secret; +use serde::{Deserialize, Serialize}; + +#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] +pub struct OPNSenseFirewallCredentials { + pub username: String, + pub password: String, +} + +// TODO we need a better way to handle multiple "instances" of the same secret structure. +#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] +pub struct SshKeyPair { + pub private: String, + pub public: String, +} + +#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] +pub struct RedhatSecret { + pub pull_secret: String, +} diff --git a/harmony/src/domain/hardware/mod.rs b/harmony/src/domain/hardware/mod.rs index 3a14e1a..1b1a72c 100644 --- a/harmony/src/domain/hardware/mod.rs +++ b/harmony/src/domain/hardware/mod.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use derive_new::new; use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive}; use harmony_types::net::MacAddress; @@ -10,7 +8,7 @@ pub type HostGroup = Vec; pub type SwitchGroup = Vec; pub type FirewallGroup = Vec; -#[derive(Debug, Clone, Serialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct PhysicalHost { pub id: Id, pub category: HostCategory, @@ -151,6 +149,98 @@ impl PhysicalHost { parts.join(" | ") } + pub fn parts_list(&self) -> String { + let PhysicalHost { + id, + category, + network, + storage, + labels, + memory_modules, + cpus, + } = self; + + let mut parts_list = String::new(); + parts_list.push_str("\n\n====================="); + parts_list.push_str(&format!("\nHost ID {id}")); + parts_list.push_str("\n====================="); + parts_list.push_str("\n\n====================="); + parts_list.push_str(&format!("\nCPU count {}", cpus.len())); + parts_list.push_str("\n====================="); + cpus.iter().for_each(|c| { + let CPU { + model, + vendor, + cores, + threads, + frequency_mhz, + } = c; + parts_list.push_str(&format!( + "\n{vendor} {model}, {cores}/{threads} {}Ghz", + *frequency_mhz as f64 / 1000.0 + )); + }); + + parts_list.push_str("\n\n====================="); + parts_list.push_str(&format!("\nNetwork Interfaces count {}", network.len())); + parts_list.push_str("\n====================="); + network.iter().for_each(|nic| { + parts_list.push_str(&format!( + "\nNic({} {}Gbps mac({}) ipv4({}), ipv6({})", + nic.name, + nic.speed_mbps.unwrap_or(0) / 1000, + nic.mac_address, + nic.ipv4_addresses.join(","), + nic.ipv6_addresses.join(",") + )); + }); + + parts_list.push_str("\n\n====================="); + parts_list.push_str(&format!("\nStorage drives count {}", storage.len())); + parts_list.push_str("\n====================="); + storage.iter().for_each(|drive| { + let StorageDrive { + name, + model, + serial, + size_bytes, + logical_block_size: _, + physical_block_size: _, + rotational: _, + wwn: _, + interface_type, + smart_status, + } = drive; + parts_list.push_str(&format!( + "\n{name} {}Gb {model} {interface_type} smart({smart_status:?}) {serial}", + size_bytes / 1000 / 1000 / 1000 + )); + }); + + parts_list.push_str("\n\n====================="); + parts_list.push_str(&format!("\nMemory modules count {}", memory_modules.len())); + parts_list.push_str("\n====================="); + memory_modules.iter().for_each(|mem| { + let MemoryModule { + size_bytes, + speed_mhz, + manufacturer, + part_number, + serial_number, + rank, + } = mem; + parts_list.push_str(&format!( + "\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})", + size_bytes / 1000 / 1000 / 1000, + speed_mhz.unwrap_or(0), + manufacturer.as_ref().unwrap_or(&String::new()), + part_number.as_ref().unwrap_or(&String::new()), + )); + }); + + parts_list + } + pub fn cluster_mac(&self) -> MacAddress { self.network .first() @@ -173,6 +263,10 @@ impl PhysicalHost { self } + pub fn get_mac_address(&self) -> Vec { + self.network.iter().map(|nic| nic.mac_address).collect() + } + pub fn label(mut self, name: String, value: String) -> Self { self.labels.push(Label { name, value }); self @@ -221,15 +315,6 @@ impl PhysicalHost { // } // } -impl<'de> Deserialize<'de> for PhysicalHost { - fn deserialize(_deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - todo!() - } -} - #[derive(new, Serialize)] pub struct ManualManagementInterface; @@ -273,16 +358,13 @@ where } } -#[derive(Debug, Clone, Serialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub enum HostCategory { Server, Firewall, Switch, } -#[cfg(test)] -use harmony_macros::mac_address; - use harmony_types::id::Id; #[derive(Debug, Clone, Serialize)] @@ -291,7 +373,7 @@ pub struct Switch { _management_interface: NetworkInterface, } -#[derive(Debug, new, Clone, Serialize)] +#[derive(Debug, new, Clone, Serialize, Deserialize)] pub struct Label { pub name: String, pub value: String, diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index 737bf28..4f1f209 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -142,6 +142,12 @@ impl From for InterpretError { } } +impl From for InterpretError { + fn from(value: harmony_secret::SecretStoreError) -> Self { + InterpretError::new(format!("Interpret error : {value}")) + } +} + impl From for InterpretError { fn from(value: ExecutorError) -> Self { Self { diff --git a/harmony/src/domain/inventory/mod.rs b/harmony/src/domain/inventory/mod.rs index ae8589d..7d160d7 100644 --- a/harmony/src/domain/inventory/mod.rs +++ b/harmony/src/domain/inventory/mod.rs @@ -17,12 +17,14 @@ impl InventoryFilter { use derive_new::new; use log::info; +use serde::{Deserialize, Serialize}; +use strum::EnumIter; use crate::hardware::{ManagementInterface, ManualManagementInterface}; use super::{ filter::Filter, - hardware::{FirewallGroup, HostGroup, Location, SwitchGroup}, + hardware::{HostGroup, Location, SwitchGroup}, }; #[derive(Debug)] @@ -61,3 +63,11 @@ impl Inventory { } } } + +#[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone, EnumIter)] +pub enum HostRole { + Bootstrap, + ControlPlane, + Worker, + Storage, +} diff --git a/harmony/src/domain/inventory/repository.rs b/harmony/src/domain/inventory/repository.rs index e4e02a9..7b6d798 100644 --- a/harmony/src/domain/inventory/repository.rs +++ b/harmony/src/domain/inventory/repository.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; -use crate::hardware::PhysicalHost; +use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole}; /// Errors that can occur within the repository layer. #[derive(thiserror::Error, Debug)] @@ -15,6 +15,12 @@ pub enum RepoError { ConnectionFailed(String), } +impl From for InterpretError { + fn from(value: RepoError) -> Self { + InterpretError::new(format!("Interpret error : {value}")) + } +} + // --- Trait and Implementation --- /// Defines the contract for inventory persistence. @@ -22,4 +28,11 @@ pub enum RepoError { pub trait InventoryRepository: Send + Sync + 'static { async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; async fn get_latest_by_id(&self, host_id: &str) -> Result, RepoError>; + async fn get_all_hosts(&self) -> Result, RepoError>; + async fn get_host_for_role(&self, role: &HostRole) -> Result, RepoError>; + async fn save_role_mapping( + &self, + role: &HostRole, + host: &PhysicalHost, + ) -> Result<(), RepoError>; } diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index 707081a..c9f565e 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -69,6 +69,26 @@ impl K8sclient for HAClusterTopology { } impl HAClusterTopology { + // TODO this is a hack to avoid refactoring + pub fn get_cluster_name(&self) -> String { + self.domain_name + .split(".") + .next() + .expect("Cluster domain name must not be empty") + .to_string() + } + + pub fn get_cluster_base_domain(&self) -> String { + let base_domain = self + .domain_name + .strip_prefix(&self.get_cluster_name()) + .expect("cluster domain must start with cluster name"); + base_domain + .strip_prefix(".") + .unwrap_or(base_domain) + .to_string() + } + pub fn autoload() -> Self { let dummy_infra = Arc::new(DummyInfra {}); let dummy_host = LogicalHost { @@ -161,6 +181,14 @@ impl DhcpServer for HAClusterTopology { self.dhcp_server.set_pxe_options(options).await } + async fn set_dhcp_range( + &self, + start: &IpAddress, + end: &IpAddress, + ) -> Result<(), ExecutorError> { + self.dhcp_server.set_dhcp_range(start, end).await + } + fn get_ip(&self) -> IpAddress { self.dhcp_server.get_ip() } @@ -209,8 +237,12 @@ impl Router for HAClusterTopology { #[async_trait] impl HttpServer for HAClusterTopology { - async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { - self.http_server.serve_files(url).await + async fn serve_files( + &self, + url: &Url, + remote_path: &Option, + ) -> Result<(), ExecutorError> { + self.http_server.serve_files(url, remote_path).await } async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { @@ -298,6 +330,13 @@ impl DhcpServer for DummyInfra { async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } + async fn set_dhcp_range( + &self, + start: &IpAddress, + end: &IpAddress, + ) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } fn get_ip(&self) -> IpAddress { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } @@ -362,7 +401,11 @@ impl TftpServer for DummyInfra { #[async_trait] impl HttpServer for DummyInfra { - async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> { + async fn serve_files( + &self, + _url: &Url, + _remote_path: &Option, + ) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> { diff --git a/harmony/src/domain/topology/http.rs b/harmony/src/domain/topology/http.rs index cc6c1f0..2459206 100644 --- a/harmony/src/domain/topology/http.rs +++ b/harmony/src/domain/topology/http.rs @@ -5,7 +5,11 @@ use harmony_types::net::IpAddress; use harmony_types::net::Url; #[async_trait] pub trait HttpServer: Send + Sync { - async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>; + async fn serve_files( + &self, + url: &Url, + remote_path: &Option, + ) -> Result<(), ExecutorError>; async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>; fn get_ip(&self) -> IpAddress; diff --git a/harmony/src/domain/topology/load_balancer.rs b/harmony/src/domain/topology/load_balancer.rs index 3a38103..901602b 100644 --- a/harmony/src/domain/topology/load_balancer.rs +++ b/harmony/src/domain/topology/load_balancer.rs @@ -102,8 +102,17 @@ pub enum HttpStatusCode { ServerError5xx, } +#[derive(Debug, Clone, PartialEq, Serialize)] +pub enum SSL { + SSL, + Disabled, + Default, + SNI, + Other(String), +} + #[derive(Debug, Clone, PartialEq, Serialize)] pub enum HealthCheck { - HTTP(String, HttpMethod, HttpStatusCode), + HTTP(String, HttpMethod, HttpStatusCode, SSL), TCP(Option), } diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index 7773ae1..c7ab5cc 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -11,15 +11,21 @@ use super::{LogicalHost, k8s::K8sClient}; #[derive(Debug)] pub struct DHCPStaticEntry { pub name: String, - pub mac: MacAddress, + pub mac: Vec, pub ip: Ipv4Addr, } impl std::fmt::Display for DHCPStaticEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mac = self + .mac + .iter() + .map(|m| m.to_string()) + .collect::>() + .join(","); f.write_fmt(format_args!( "DHCPStaticEntry : name {}, mac {}, ip {}", - self.name, self.mac, self.ip + self.name, mac, self.ip )) } } @@ -41,6 +47,7 @@ impl std::fmt::Debug for dyn Firewall { pub struct NetworkDomain { pub name: String, } + #[async_trait] pub trait K8sclient: Send + Sync { async fn k8s_client(&self) -> Result, String>; @@ -59,6 +66,8 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug { async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; + async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress) + -> Result<(), ExecutorError>; fn get_ip(&self) -> IpAddress; fn get_host(&self) -> LogicalHost; async fn commit_config(&self) -> Result<(), ExecutorError>; diff --git a/harmony/src/infra/inventory/sqlite.rs b/harmony/src/infra/inventory/sqlite.rs index 967bcb9..f772f72 100644 --- a/harmony/src/infra/inventory/sqlite.rs +++ b/harmony/src/infra/inventory/sqlite.rs @@ -1,6 +1,6 @@ use crate::{ hardware::PhysicalHost, - inventory::{InventoryRepository, RepoError}, + inventory::{HostRole, InventoryRepository, RepoError}, }; use async_trait::async_trait; use harmony_types::id::Id; @@ -46,20 +46,104 @@ impl InventoryRepository for SqliteInventoryRepository { } async fn get_latest_by_id(&self, host_id: &str) -> Result, RepoError> { - let _row = sqlx::query_as!( + let row = sqlx::query_as!( DbHost, r#"SELECT id, version_id, data as "data: Json" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, host_id ) .fetch_optional(&self.pool) .await?; - todo!() + + Ok(row.map(|r| r.data.0)) + } + + async fn get_all_hosts(&self) -> Result, RepoError> { + let db_hosts = sqlx::query_as!( + DbHost, + r#" + SELECT + p1.id, + p1.version_id, + p1.data as "data: Json" + FROM + physical_hosts p1 + INNER JOIN ( + SELECT + id, + MAX(version_id) AS max_version + FROM + physical_hosts + GROUP BY + id + ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version + "# + ) + .fetch_all(&self.pool) + .await?; + + let hosts = db_hosts.into_iter().map(|row| row.data.0).collect(); + + Ok(hosts) + } + + async fn save_role_mapping( + &self, + role: &HostRole, + host: &PhysicalHost, + ) -> Result<(), RepoError> { + let host_id = host.id.to_string(); + + sqlx::query!( + r#" + INSERT INTO host_role_mapping (host_id, role) + VALUES (?, ?) + "#, + host_id, + role + ) + .execute(&self.pool) + .await?; + + info!("Saved role mapping for host '{}' as '{:?}'", host.id, role); + + Ok(()) + } + + async fn get_host_for_role(&self, role: &HostRole) -> Result, RepoError> { + struct HostIdRow { + host_id: String, + } + + let role_str = format!("{:?}", role); + + let host_id_rows = sqlx::query_as!( + HostIdRow, + "SELECT host_id FROM host_role_mapping WHERE role = ?", + role_str + ) + .fetch_all(&self.pool) + .await?; + + let mut hosts = Vec::with_capacity(host_id_rows.len()); + for row in host_id_rows { + match self.get_latest_by_id(&row.host_id).await? { + Some(host) => hosts.push(host), + None => { + log::warn!( + "Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.", + row.host_id + ); + } + } + } + + Ok(hosts) } } use sqlx::types::Json; struct DbHost { data: Json, - id: Id, - version_id: Id, + id: String, + version_id: String, } diff --git a/harmony/src/infra/opnsense/dhcp.rs b/harmony/src/infra/opnsense/dhcp.rs index 272ffc2..ce918a8 100644 --- a/harmony/src/infra/opnsense/dhcp.rs +++ b/harmony/src/infra/opnsense/dhcp.rs @@ -17,13 +17,13 @@ impl DhcpServer for OPNSenseFirewall { } async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> { - let mac: String = String::from(&entry.mac); + let mac: Vec = entry.mac.iter().map(MacAddress::to_string).collect(); { let mut writable_opnsense = self.opnsense_config.write().await; writable_opnsense .dhcp() - .add_static_mapping(&mac, entry.ip, &entry.name) + .add_static_mapping(&mac, &entry.ip, &entry.name) .unwrap(); } @@ -68,4 +68,19 @@ impl DhcpServer for OPNSenseFirewall { ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) }) } + + async fn set_dhcp_range( + &self, + start: &IpAddress, + end: &IpAddress, + ) -> Result<(), ExecutorError> { + let mut writable_opnsense = self.opnsense_config.write().await; + writable_opnsense + .dhcp() + .set_dhcp_range(&start.to_string(), &end.to_string()) + .await + .map_err(|dhcp_error| { + ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}")) + }) + } } diff --git a/harmony/src/infra/opnsense/dns.rs b/harmony/src/infra/opnsense/dns.rs index 7a58b64..4571db3 100644 --- a/harmony/src/infra/opnsense/dns.rs +++ b/harmony/src/infra/opnsense/dns.rs @@ -1,4 +1,3 @@ -use crate::infra::opnsense::Host; use crate::infra::opnsense::LogicalHost; use crate::{ executors::ExecutorError, @@ -12,21 +11,22 @@ use super::OPNSenseFirewall; #[async_trait] impl DnsServer for OPNSenseFirewall { async fn register_hosts(&self, hosts: Vec) -> Result<(), ExecutorError> { - let mut writable_opnsense = self.opnsense_config.write().await; - let mut dns = writable_opnsense.dns(); - let hosts = hosts - .iter() - .map(|h| { - Host::new( - h.host.clone(), - h.domain.clone(), - h.record_type.to_string(), - h.value.to_string(), - ) - }) - .collect(); - dns.register_hosts(hosts); - Ok(()) + todo!("Refactor this to use dnsmasq") + // let mut writable_opnsense = self.opnsense_config.write().await; + // let mut dns = writable_opnsense.dns(); + // let hosts = hosts + // .iter() + // .map(|h| { + // Host::new( + // h.host.clone(), + // h.domain.clone(), + // h.record_type.to_string(), + // h.value.to_string(), + // ) + // }) + // .collect(); + // dns.add_static_mapping(hosts); + // Ok(()) } fn remove_record( @@ -38,25 +38,26 @@ impl DnsServer for OPNSenseFirewall { } async fn list_records(&self) -> Vec { - self.opnsense_config - .write() - .await - .dns() - .get_hosts() - .iter() - .map(|h| DnsRecord { - host: h.hostname.clone(), - domain: h.domain.clone(), - record_type: h - .rr - .parse() - .expect("received invalid record type {h.rr} from opnsense"), - value: h - .server - .parse() - .expect("received invalid ipv4 record from opnsense {h.server}"), - }) - .collect() + todo!("Refactor this to use dnsmasq") + // self.opnsense_config + // .write() + // .await + // .dns() + // .get_hosts() + // .iter() + // .map(|h| DnsRecord { + // host: h.hostname.clone(), + // domain: h.domain.clone(), + // record_type: h + // .rr + // .parse() + // .expect("received invalid record type {h.rr} from opnsense"), + // value: h + // .server + // .parse() + // .expect("received invalid ipv4 record from opnsense {h.server}"), + // }) + // .collect() } fn get_ip(&self) -> IpAddress { @@ -68,11 +69,12 @@ impl DnsServer for OPNSenseFirewall { } async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { - let mut writable_opnsense = self.opnsense_config.write().await; - let mut dns = writable_opnsense.dns(); - dns.register_dhcp_leases(register); - - Ok(()) + todo!("Refactor this to use dnsmasq") + // let mut writable_opnsense = self.opnsense_config.write().await; + // let mut dns = writable_opnsense.dns(); + // dns.register_dhcp_leases(register); + // + // Ok(()) } async fn commit_config(&self) -> Result<(), ExecutorError> { diff --git a/harmony/src/infra/opnsense/http.rs b/harmony/src/infra/opnsense/http.rs index fa6fe7d..70bbee1 100644 --- a/harmony/src/infra/opnsense/http.rs +++ b/harmony/src/infra/opnsense/http.rs @@ -10,13 +10,21 @@ const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http"; #[async_trait] impl HttpServer for OPNSenseFirewall { - async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { + async fn serve_files( + &self, + url: &Url, + remote_path: &Option, + ) -> Result<(), ExecutorError> { let config = self.opnsense_config.read().await; info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}"); + let remote_upload_path = remote_path + .clone() + .map(|r| format!("{OPNSENSE_HTTP_ROOT_PATH}/{r}")) + .unwrap_or(OPNSENSE_HTTP_ROOT_PATH.to_string()); match url { Url::LocalFolder(path) => { config - .upload_files(path, OPNSENSE_HTTP_ROOT_PATH) + .upload_files(path, &remote_upload_path) .await .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; } diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index 9414faf..ce47f05 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -1,13 +1,15 @@ use async_trait::async_trait; -use log::{debug, info, warn}; -use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer}; +use log::{debug, error, info, warn}; +use opnsense_config_xml::{ + Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString, +}; use uuid::Uuid; use crate::{ executors::ExecutorError, topology::{ BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService, - LogicalHost, + LogicalHost, SSL, }, }; use harmony_types::net::IpAddress; @@ -206,7 +208,22 @@ pub(crate) fn get_health_check_for_backend( .unwrap_or_default() .into(); let status_code: HttpStatusCode = HttpStatusCode::Success2xx; - Some(HealthCheck::HTTP(path, method, status_code)) + let ssl = match haproxy_health_check + .ssl + .content_string() + .to_uppercase() + .as_str() + { + "SSL" => SSL::SSL, + "SSLNI" => SSL::SNI, + "NOSSL" => SSL::Disabled, + "" => SSL::Default, + other => { + error!("Unknown haproxy health check ssl config {other}"); + SSL::Other(other.to_string()) + } + }; + Some(HealthCheck::HTTP(path, method, status_code, ssl)) } _ => panic!("Received unsupported health check type {}", uppercase), } @@ -241,7 +258,14 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( // frontend points to backend let healthcheck = if let Some(health_check) = &service.health_check { match health_check { - HealthCheck::HTTP(path, http_method, _http_status_code) => { + HealthCheck::HTTP(path, http_method, _http_status_code, ssl) => { + let ssl: MaybeString = match ssl { + SSL::SSL => "ssl".into(), + SSL::SNI => "sslni".into(), + SSL::Disabled => "nossl".into(), + SSL::Default => "".into(), + SSL::Other(other) => other.as_str().into(), + }; let haproxy_check = HAProxyHealthCheck { name: format!("HTTP_{http_method}_{path}"), uuid: Uuid::new_v4().to_string(), @@ -249,6 +273,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( health_check_type: "http".to_string(), http_uri: path.clone().into(), interval: "2s".to_string(), + ssl, ..Default::default() }; diff --git a/harmony/src/modules/dhcp.rs b/harmony/src/modules/dhcp.rs index 9ef45d3..eff2912 100644 --- a/harmony/src/modules/dhcp.rs +++ b/harmony/src/modules/dhcp.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use derive_new::new; use harmony_types::id::Id; -use log::info; +use log::{info, trace}; use serde::Serialize; use crate::{ @@ -22,6 +22,8 @@ pub struct DhcpScore { pub filename: Option, pub filename64: Option, pub filenameipxe: Option, + pub dhcp_range: (IpAddress, IpAddress), + pub domain: Option, } impl Score for DhcpScore { @@ -52,48 +54,6 @@ impl DhcpInterpret { status: InterpretStatus::QUEUED, } } - async fn add_static_entries( - &self, - _inventory: &Inventory, - dhcp_server: &D, - ) -> Result { - let dhcp_entries: Vec = self - .score - .host_binding - .iter() - .map(|binding| { - let ip = match binding.logical_host.ip { - std::net::IpAddr::V4(ipv4) => ipv4, - std::net::IpAddr::V6(_) => { - unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") - } - }; - - DHCPStaticEntry { - name: binding.logical_host.name.clone(), - mac: binding.physical_host.cluster_mac(), - ip, - } - }) - .collect(); - info!("DHCPStaticEntry : {:?}", dhcp_entries); - - info!("DHCP server : {:?}", dhcp_server); - - let number_new_entries = dhcp_entries.len(); - - for entry in dhcp_entries.into_iter() { - match dhcp_server.add_static_mapping(&entry).await { - Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), - Err(_) => todo!(), - } - } - - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!("Dhcp Interpret registered {} entries", number_new_entries), - )) - } async fn set_pxe_options( &self, @@ -124,7 +84,7 @@ impl DhcpInterpret { } #[async_trait] -impl Interpret for DhcpInterpret { +impl Interpret for DhcpInterpret { fn get_name(&self) -> InterpretName { InterpretName::OPNSenseDHCP } @@ -149,8 +109,16 @@ impl Interpret for DhcpInterpret { info!("Executing DhcpInterpret on inventory {inventory:?}"); self.set_pxe_options(inventory, topology).await?; + topology + .set_dhcp_range(&self.score.dhcp_range.0, &self.score.dhcp_range.1) + .await?; - self.add_static_entries(inventory, topology).await?; + DhcpHostBindingScore { + host_binding: self.score.host_binding.clone(), + domain: self.score.domain.clone(), + } + .interpret(inventory, topology) + .await?; topology.commit_config().await?; @@ -160,3 +128,120 @@ impl Interpret for DhcpInterpret { )) } } + +#[derive(Debug, new, Clone, Serialize)] +pub struct DhcpHostBindingScore { + pub host_binding: Vec, + pub domain: Option, +} + +impl Score for DhcpHostBindingScore { + fn create_interpret(&self) -> Box> { + Box::new(DhcpHostBindingInterpret { + score: self.clone(), + }) + } + + fn name(&self) -> String { + "DhcpHostBindingScore".to_string() + } +} + +// https://docs.opnsense.org/manual/dhcp.html#advanced-settings +#[derive(Debug, Clone)] +pub struct DhcpHostBindingInterpret { + score: DhcpHostBindingScore, +} + +impl DhcpHostBindingInterpret { + async fn add_static_entries( + &self, + _inventory: &Inventory, + dhcp_server: &D, + ) -> Result { + let dhcp_entries: Vec = self + .score + .host_binding + .iter() + .map(|binding| { + let ip = match binding.logical_host.ip { + std::net::IpAddr::V4(ipv4) => ipv4, + std::net::IpAddr::V6(_) => { + unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") + } + }; + + let name = if let Some(domain) = self.score.domain.as_ref() { + format!("{}.{}", binding.logical_host.name, domain) + } else { + binding.logical_host.name.clone() + }; + + DHCPStaticEntry { + name, + mac: binding.physical_host.get_mac_address(), + ip, + } + }) + .collect(); + info!("DHCPStaticEntry : {:?}", dhcp_entries); + + trace!("DHCP server : {:?}", dhcp_server); + + let number_new_entries = dhcp_entries.len(); + + for entry in dhcp_entries.into_iter() { + match dhcp_server.add_static_mapping(&entry).await { + Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), + Err(_) => todo!(), + } + } + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + format!("Dhcp Interpret registered {} entries", number_new_entries), + )) + } +} + +#[async_trait] +impl Interpret for DhcpHostBindingInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("DhcpHostBindingInterpret") + } + + fn get_version(&self) -> crate::domain::data::Version { + Version::from("1.0.0").unwrap() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } + + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + info!( + "Executing DhcpHostBindingInterpret on {} bindings", + self.score.host_binding.len() + ); + + self.add_static_entries(inventory, topology).await?; + + topology.commit_config().await?; + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + format!( + "Dhcp Host Binding Interpret execution successful on {} hosts", + self.score.host_binding.len() + ), + )) + } +} diff --git a/harmony/src/modules/http.rs b/harmony/src/modules/http.rs index fd7a5c8..c654e20 100644 --- a/harmony/src/modules/http.rs +++ b/harmony/src/modules/http.rs @@ -3,14 +3,14 @@ use derive_new::new; use serde::Serialize; use crate::{ - data::{FileContent, Version}, + data::{FileContent, FilePath, Version}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::Inventory, score::Score, topology::{HttpServer, Topology}, }; -use harmony_types::id::Id; use harmony_types::net::Url; +use harmony_types::{id::Id, net::MacAddress}; /// Configure an HTTP server that is provided by the Topology /// @@ -25,8 +25,11 @@ use harmony_types::net::Url; /// ``` #[derive(Debug, new, Clone, Serialize)] pub struct StaticFilesHttpScore { + // TODO this should be split in two scores, one for folder and + // other for files pub folder_to_serve: Option, pub files: Vec, + pub remote_path: Option, } impl Score for StaticFilesHttpScore { @@ -54,7 +57,9 @@ impl Interpret for StaticFilesHttpInterpret { http_server.ensure_initialized().await?; // http_server.set_ip(topology.router.get_gateway()).await?; if let Some(folder) = self.score.folder_to_serve.as_ref() { - http_server.serve_files(folder).await?; + http_server + .serve_files(folder, &self.score.remote_path) + .await?; } for f in self.score.files.iter() { @@ -91,3 +96,34 @@ impl Interpret for StaticFilesHttpInterpret { todo!() } } + +#[derive(Debug, new, Clone, Serialize)] +pub struct IPxeMacBootFileScore { + pub content: String, + pub mac_address: Vec, +} + +impl Score for IPxeMacBootFileScore { + fn name(&self) -> String { + "IPxeMacBootFileScore".to_string() + } + + fn create_interpret(&self) -> Box> { + StaticFilesHttpScore { + remote_path: None, + folder_to_serve: None, + files: self + .mac_address + .iter() + .map(|mac| FileContent { + path: FilePath::Relative(format!( + "byMAC/01-{}.ipxe", + mac.to_string().replace(":", "-") + )), + content: self.content.clone(), + }) + .collect(), + } + .create_interpret() + } +} diff --git a/harmony/src/modules/inventory/discovery.rs b/harmony/src/modules/inventory/discovery.rs new file mode 100644 index 0000000..143c56a --- /dev/null +++ b/harmony/src/modules/inventory/discovery.rs @@ -0,0 +1,122 @@ +use async_trait::async_trait; +use harmony_types::id::Id; +use log::{error, info}; +use serde::{Deserialize, Serialize}; + +use crate::{ + data::Version, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::inventory::LaunchDiscoverInventoryAgentScore, + score::Score, + topology::Topology, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DiscoverHostForRoleScore { + pub role: HostRole, +} + +impl Score for DiscoverHostForRoleScore { + fn name(&self) -> String { + "DiscoverInventoryAgentScore".to_string() + } + + fn create_interpret(&self) -> Box> { + Box::new(DiscoverHostForRoleInterpret { + score: self.clone(), + }) + } +} + +#[derive(Debug)] +pub struct DiscoverHostForRoleInterpret { + score: DiscoverHostForRoleScore, +} + +#[async_trait] +impl Interpret for DiscoverHostForRoleInterpret { + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + info!( + "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://:8080/inventory`" + ); + LaunchDiscoverInventoryAgentScore { + discovery_timeout: None, + } + .interpret(inventory, topology) + .await?; + + let host: PhysicalHost; + let host_repo = InventoryRepositoryFactory::build().await?; + + loop { + let all_hosts = host_repo.get_all_hosts().await?; + + if all_hosts.is_empty() { + info!("No discovered hosts found yet. Waiting for hosts to appear..."); + // Sleep to avoid spamming the user and logs while waiting for nodes. + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + continue; + } + + let ans = inquire::Select::new( + &format!("Select the node to be used for role {:?}:", self.score.role), + all_hosts, + ) + .with_help_message("Press Esc to refresh the list of discovered hosts") + .prompt(); + + match ans { + Ok(choice) => { + info!("Selected {} as the bootstrap node.", choice.summary()); + host_repo + .save_role_mapping(&self.score.role, &choice) + .await?; + host = choice; + break; + } + Err(inquire::InquireError::OperationCanceled) => { + info!("Refresh requested. Fetching list of discovered hosts again..."); + continue; + } + Err(e) => { + error!( + "Failed to select node for role {:?} : {}", + self.score.role, e + ); + return Err(InterpretError::new(format!( + "Could not select host : {}", + e.to_string() + ))); + } + } + } + + Ok(Outcome::success(format!( + "Successfully discovered host {} for role {:?}", + host.summary(), + self.score.role + ))) + } + fn get_name(&self) -> InterpretName { + InterpretName::Custom("DiscoverHostForRoleScore") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/inventory/inspect.rs b/harmony/src/modules/inventory/inspect.rs new file mode 100644 index 0000000..aa40a42 --- /dev/null +++ b/harmony/src/modules/inventory/inspect.rs @@ -0,0 +1,72 @@ +use async_trait::async_trait; +use harmony_types::id::Id; +use log::info; +use serde::{Deserialize, Serialize}; +use strum::IntoEnumIterator; + +use crate::{ + data::Version, + infra::inventory::InventoryRepositoryFactory, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + score::Score, + topology::Topology, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InspectInventoryScore {} + +impl Score for InspectInventoryScore { + fn name(&self) -> String { + "InspectInventoryScore".to_string() + } + + #[doc(hidden)] + fn create_interpret(&self) -> Box> { + Box::new(InspectInventoryInterpret {}) + } +} + +#[derive(Debug)] +pub struct InspectInventoryInterpret; + +#[async_trait] +impl Interpret for InspectInventoryInterpret { + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + let repo = InventoryRepositoryFactory::build().await?; + for role in HostRole::iter() { + info!("Inspecting hosts for role {role:?}"); + let hosts = repo.get_host_for_role(&role).await?; + info!("Hosts with role {role:?} : {}", hosts.len()); + hosts.iter().enumerate().for_each(|(idx, h)| { + info!( + "Found host index {idx} with role {role:?} => \n{}\n{}", + h.summary(), + h.parts_list() + ) + }); + } + Ok(Outcome::success( + "Inventory inspection complete".to_string(), + )) + } + fn get_name(&self) -> InterpretName { + InterpretName::Custom("InspectInventoryInterpret") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/inventory/mod.rs b/harmony/src/modules/inventory/mod.rs index 67d7489..0274dc4 100644 --- a/harmony/src/modules/inventory/mod.rs +++ b/harmony/src/modules/inventory/mod.rs @@ -1,3 +1,7 @@ +mod discovery; +pub mod inspect; +pub use discovery::*; + use async_trait::async_trait; use harmony_inventory_agent::local_presence::DiscoveryEvent; use log::{debug, info, trace}; @@ -18,11 +22,11 @@ use harmony_types::id::Id; /// This will allow us to register/update hosts running harmony_inventory_agent /// from LAN in the Harmony inventory #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DiscoverInventoryAgentScore { +pub struct LaunchDiscoverInventoryAgentScore { pub discovery_timeout: Option, } -impl Score for DiscoverInventoryAgentScore { +impl Score for LaunchDiscoverInventoryAgentScore { fn name(&self) -> String { "DiscoverInventoryAgentScore".to_string() } @@ -36,7 +40,7 @@ impl Score for DiscoverInventoryAgentScore { #[derive(Debug)] struct DiscoverInventoryAgentInterpret { - score: DiscoverInventoryAgentScore, + score: LaunchDiscoverInventoryAgentScore, } #[async_trait] @@ -46,6 +50,13 @@ impl Interpret for DiscoverInventoryAgentInterpret { _inventory: &Inventory, _topology: &T, ) -> Result { + match self.score.discovery_timeout { + Some(timeout) => info!("Discovery agent will wait for {timeout} seconds"), + None => info!( + "Discovery agent will wait forever in the background, go on and enjoy this delicious inventory." + ), + }; + harmony_inventory_agent::local_presence::discover_agents( self.score.discovery_timeout, |event: DiscoveryEvent| -> Result<(), String> { diff --git a/harmony/src/modules/ipxe.rs b/harmony/src/modules/ipxe.rs deleted file mode 100644 index a7aa472..0000000 --- a/harmony/src/modules/ipxe.rs +++ /dev/null @@ -1,67 +0,0 @@ -use async_trait::async_trait; -use derive_new::new; -use serde::Serialize; - -use crate::{ - data::Version, - interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::Inventory, - score::Score, - topology::Topology, -}; -use harmony_types::id::Id; - -#[derive(Debug, new, Clone, Serialize)] -pub struct IpxeScore { - //files_to_serve: Url, -} - -impl Score for IpxeScore { - fn create_interpret(&self) -> Box> { - Box::new(IpxeInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "IpxeScore".to_string() - } -} - -#[derive(Debug, new, Clone)] -pub struct IpxeInterpret { - _score: IpxeScore, -} - -#[async_trait] -impl Interpret for IpxeInterpret { - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - /* - let http_server = &topology.http_server; - http_server.ensure_initialized().await?; - Ok(Outcome::success(format!( - "Http Server running and serving files from {}", - self.score.files_to_serve - ))) - */ - todo!(); - } - - fn get_name(&self) -> InterpretName { - InterpretName::Ipxe - } - - fn get_version(&self) -> Version { - todo!() - } - - fn get_status(&self) -> InterpretStatus { - todo!() - } - - fn get_children(&self) -> Vec { - todo!() - } -} diff --git a/harmony/src/modules/mod.rs b/harmony/src/modules/mod.rs index 8935278..682e16b 100644 --- a/harmony/src/modules/mod.rs +++ b/harmony/src/modules/mod.rs @@ -6,7 +6,6 @@ pub mod dummy; pub mod helm; pub mod http; pub mod inventory; -pub mod ipxe; pub mod k3d; pub mod k8s; pub mod lamp; diff --git a/harmony/src/modules/okd/bootstrap_01_prepare.rs b/harmony/src/modules/okd/bootstrap_01_prepare.rs new file mode 100644 index 0000000..d3409e2 --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_01_prepare.rs @@ -0,0 +1,120 @@ +use async_trait::async_trait; +use derive_new::new; +use harmony_types::id::Id; +use log::{error, info, warn}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore}, + score::Score, + topology::HAClusterTopology, +}; +// ------------------------------------------------------------------------------------------------- +// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent) +// - This score exposes/ensures the default inventory assets and waits for discoveries. +// - No early bonding. Simple access DHCP. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup01InventoryScore {} + +impl Score for OKDSetup01InventoryScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup01InventoryInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup01InventoryScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup01InventoryInterpret { + score: OKDSetup01InventoryScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup01InventoryInterpret { + pub fn new(score: OKDSetup01InventoryScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } +} + +#[async_trait] +impl Interpret for OKDSetup01InventoryInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup01Inventory") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result { + info!("Setting up base DNS config for OKD"); + let cluster_domain = &topology.domain_name; + let load_balancer_ip = &topology.load_balancer.get_ip(); + inquire::Confirm::new(&format!( + "Set hostnames manually in your opnsense dnsmasq config : +*.apps.{cluster_domain} -> {load_balancer_ip} +api.{cluster_domain} -> {load_balancer_ip} +api-int.{cluster_domain} -> {load_balancer_ip} + +When you can dig them, confirm to continue. +" + )) + .prompt() + .expect("Prompt error"); + // TODO reactivate automatic dns config when migration from unbound to dnsmasq is done + // OKDDnsScore::new(topology) + // .interpret(inventory, topology) + // .await?; + + // TODO refactor this section into a function discover_hosts_for_role(...) that can be used + // from anywhere in the project, not a member of this struct + + let mut bootstrap_host: Option = None; + let repo = InventoryRepositoryFactory::build().await?; + + while bootstrap_host.is_none() { + let hosts = repo.get_host_for_role(&HostRole::Bootstrap).await?; + bootstrap_host = hosts.into_iter().next().to_owned(); + DiscoverHostForRoleScore { + role: HostRole::Bootstrap, + } + .interpret(inventory, topology) + .await?; + } + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + format!( + "Found and assigned bootstrap node: {}", + bootstrap_host.unwrap().summary() + ), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_02_bootstrap.rs b/harmony/src/modules/okd/bootstrap_02_bootstrap.rs new file mode 100644 index 0000000..5b940fb --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_02_bootstrap.rs @@ -0,0 +1,387 @@ +use std::{fmt::Write, path::PathBuf}; + +use async_trait::async_trait; +use derive_new::new; +use harmony_secret::SecretManager; +use harmony_types::id::Id; +use log::{debug, error, info, warn}; +use serde::{Deserialize, Serialize}; +use tokio::{fs::File, io::AsyncWriteExt, process::Command}; + +use crate::{ + config::secret::{RedhatSecret, SshKeyPair}, + data::{FileContent, FilePath, Version}, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + instrumentation::{HarmonyEvent, instrument}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, + http::{IPxeMacBootFileScore, StaticFilesHttpScore}, + inventory::LaunchDiscoverInventoryAgentScore, + okd::{ + bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, + templates::{BootstrapIpxeTpl, InstallConfigYaml}, + }, + }, + score::Score, + topology::{HAClusterTopology, HostBinding}, +}; +// ------------------------------------------------------------------------------------------------- +// Step 02: Bootstrap +// - Select bootstrap node (from discovered set). +// - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition. +// - Reboot the host via SSH and wait for bootstrap-complete. +// - No bonding at this stage unless absolutely required; prefer persistence via MC later. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup02BootstrapScore {} + +impl Score for OKDSetup02BootstrapScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup02BootstrapInterpret::new()) + } + + fn name(&self) -> String { + "OKDSetup02BootstrapScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup02BootstrapInterpret { + version: Version, + status: InterpretStatus, +} + +impl OKDSetup02BootstrapInterpret { + pub fn new() -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + status: InterpretStatus::QUEUED, + } + } + + async fn get_bootstrap_node(&self) -> Result { + let repo = InventoryRepositoryFactory::build().await?; + match repo + .get_host_for_role(&HostRole::Bootstrap) + .await? + .into_iter() + .next() + { + Some(host) => Ok(host), + None => Err(InterpretError::new( + "No bootstrap node available".to_string(), + )), + } + } + + async fn prepare_ignition_files( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result<(), InterpretError> { + let okd_bin_path = PathBuf::from("./data/okd/bin"); + let okd_installation_path_str = + format!("./data/okd/installation_files_{}", inventory.location.name); + let okd_images_path = &PathBuf::from("./data/okd/installer_image/"); + let okd_installation_path = &PathBuf::from(okd_installation_path_str); + + let exit_status = Command::new("mkdir") + .arg("-p") + .arg(okd_installation_path) + .spawn() + .expect("Command failed to start") + .wait() + .await + .map_err(|e| { + InterpretError::new(format!("Failed to create okd installation directory : {e}")) + })?; + if !exit_status.success() { + return Err(InterpretError::new(format!( + "Failed to create okd installation directory" + ))); + } else { + info!( + "Created OKD installation directory {}", + okd_installation_path.to_string_lossy() + ); + } + + let redhat_secret = SecretManager::get_or_prompt::().await?; + let ssh_key = SecretManager::get_or_prompt::().await?; + + let install_config_yaml = InstallConfigYaml { + cluster_name: &topology.get_cluster_name(), + cluster_domain: &topology.get_cluster_base_domain(), + pull_secret: &redhat_secret.pull_secret, + ssh_public_key: &ssh_key.public, + } + .to_string(); + + let install_config_file_path = &okd_installation_path.join("install-config.yaml"); + + self.create_file(install_config_file_path, install_config_yaml.as_bytes()) + .await?; + + let install_config_backup_extension = install_config_file_path + .extension() + .map(|e| format!("{}.bak", e.to_string_lossy())) + .unwrap_or("bak".to_string()); + + let mut install_config_backup = install_config_file_path.clone(); + install_config_backup.set_extension(install_config_backup_extension); + + self.create_file(&install_config_backup, install_config_yaml.as_bytes()) + .await?; + + info!("Creating manifest files with openshift-install"); + let output = Command::new(okd_bin_path.join("openshift-install")) + .args([ + "create", + "manifests", + "--dir", + okd_installation_path.to_str().unwrap(), + ]) + .output() + .await + .map_err(|e| InterpretError::new(format!("Failed to create okd manifest : {e}")))?; + let stdout = String::from_utf8(output.stdout).unwrap(); + info!("openshift-install stdout :\n\n{}", stdout); + let stderr = String::from_utf8(output.stderr).unwrap(); + info!("openshift-install stderr :\n\n{}", stderr); + info!("openshift-install exit status : {}", output.status); + if !output.status.success() { + return Err(InterpretError::new(format!( + "Failed to create okd manifest, exit code {} : {}", + output.status, stderr + ))); + } + + info!("Creating ignition files with openshift-install"); + let output = Command::new(okd_bin_path.join("openshift-install")) + .args([ + "create", + "ignition-configs", + "--dir", + okd_installation_path.to_str().unwrap(), + ]) + .output() + .await + .map_err(|e| { + InterpretError::new(format!("Failed to create okd ignition config : {e}")) + })?; + let stdout = String::from_utf8(output.stdout).unwrap(); + info!("openshift-install stdout :\n\n{}", stdout); + let stderr = String::from_utf8(output.stderr).unwrap(); + info!("openshift-install stderr :\n\n{}", stderr); + info!("openshift-install exit status : {}", output.status); + if !output.status.success() { + return Err(InterpretError::new(format!( + "Failed to create okd manifest, exit code {} : {}", + output.status, stderr + ))); + } + + let ignition_files_http_path = PathBuf::from("okd_ignition_files"); + let prepare_file_content = async |filename: &str| -> Result { + let local_path = okd_installation_path.join(filename); + let remote_path = ignition_files_http_path.join(filename); + + info!( + "Preparing file content for local file : {} to remote : {}", + local_path.to_string_lossy(), + remote_path.to_string_lossy() + ); + + let content = tokio::fs::read_to_string(&local_path).await.map_err(|e| { + InterpretError::new(format!( + "Could not read file content {} : {e}", + local_path.to_string_lossy() + )) + })?; + + Ok(FileContent { + path: FilePath::Relative(remote_path.to_string_lossy().to_string()), + content, + }) + }; + + StaticFilesHttpScore { + remote_path: None, + folder_to_serve: None, + files: vec![ + prepare_file_content("bootstrap.ign").await?, + prepare_file_content("master.ign").await?, + prepare_file_content("worker.ign").await?, + prepare_file_content("metadata.json").await?, + ], + } + .interpret(inventory, topology) + .await?; + + info!("Successfully prepared ignition files for OKD installation"); + // ignition_files_http_path // = PathBuf::from("okd_ignition_files"); + info!( + r#"Uploading images, they can be refreshed with a command similar to this one: openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel.|initramfs.|rootfs.)\w+(\.img)?"' | grep x86_64 | xargs -n 1 curl -LO"# + ); + + inquire::Confirm::new( + &format!("push installer image files with `scp -r {}/* root@{}:/usr/local/http/scos/` until performance issue is resolved", okd_images_path.to_string_lossy(), topology.http_server.get_ip())).prompt().expect("Prompt error"); + + // let scos_http_path = PathBuf::from("scos"); + // StaticFilesHttpScore { + // folder_to_serve: Some(Url::LocalFolder( + // okd_images_path.to_string_lossy().to_string(), + // )), + // remote_path: Some(scos_http_path.to_string_lossy().to_string()), + // files: vec![], + // } + // .interpret(inventory, topology) + // .await?; + + Ok(()) + } + + async fn configure_host_binding( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result<(), InterpretError> { + let binding = HostBinding { + logical_host: topology.bootstrap_host.clone(), + physical_host: self.get_bootstrap_node().await?, + }; + info!("Configuring host binding for bootstrap node {binding:?}"); + + DhcpHostBindingScore { + host_binding: vec![binding], + domain: Some(topology.domain_name.clone()), + } + .interpret(inventory, topology) + .await?; + Ok(()) + } + + async fn render_per_mac_pxe( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result<(), InterpretError> { + let content = BootstrapIpxeTpl { + http_ip: &topology.http_server.get_ip().to_string(), + scos_path: "scos", // TODO use some constant + ignition_http_path: "okd_ignition_files", // TODO use proper variable + installation_device: "/dev/sda", + ignition_file_name: "bootstrap.ign", + } + .to_string(); + + let bootstrap_node = self.get_bootstrap_node().await?; + let mac_address = bootstrap_node.get_mac_address(); + + info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); + debug!("bootstrap ipxe content : {content}"); + debug!("bootstrap mac addresses : {mac_address:?}"); + + IPxeMacBootFileScore { + mac_address, + content, + } + .interpret(inventory, topology) + .await?; + Ok(()) + } + + async fn setup_bootstrap_load_balancer( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result<(), InterpretError> { + let outcome = OKDBootstrapLoadBalancerScore::new(topology) + .interpret(inventory, topology) + .await?; + info!("Successfully executed OKDBootstrapLoadBalancerScore : {outcome:?}"); + Ok(()) + } + + async fn reboot_target(&self) -> Result<(), InterpretError> { + // Placeholder: ssh reboot using the inventory ephemeral key + info!("[Bootstrap] Rebooting bootstrap node via SSH"); + // TODO reboot programatically, there are some logical checks and refactoring to do such as + // accessing the bootstrap node config (ip address) from the inventory + let confirmation = inquire::Confirm::new( + "Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.", + ) + .prompt() + .expect("Unexpected prompt error"); + Ok(()) + } + + async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> { + // Placeholder: wait-for bootstrap-complete + info!("[Bootstrap] Waiting for bootstrap-complete …"); + todo!("[Bootstrap] Waiting for bootstrap-complete …") + } + + async fn create_file(&self, path: &PathBuf, content: &[u8]) -> Result<(), InterpretError> { + let mut install_config_file = File::create(path).await.map_err(|e| { + InterpretError::new(format!( + "Could not create file {} : {e}", + path.to_string_lossy() + )) + })?; + install_config_file.write(content).await.map_err(|e| { + InterpretError::new(format!( + "Could not write file {} : {e}", + path.to_string_lossy() + )) + })?; + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup02BootstrapInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup02Bootstrap") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result { + self.configure_host_binding(inventory, topology).await?; + self.prepare_ignition_files(inventory, topology).await?; + self.render_per_mac_pxe(inventory, topology).await?; + self.setup_bootstrap_load_balancer(inventory, topology) + .await?; + + // TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal + // self.validate_dns_config(inventory, topology).await?; + + self.reboot_target().await?; + self.wait_for_bootstrap_complete().await?; + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Bootstrap phase complete".into(), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_03_control_plane.rs b/harmony/src/modules/okd/bootstrap_03_control_plane.rs new file mode 100644 index 0000000..a387e1e --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_03_control_plane.rs @@ -0,0 +1,277 @@ +use std::{fmt::Write, path::PathBuf}; + +use async_trait::async_trait; +use derive_new::new; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, + inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, + }, + score::Score, + topology::{HAClusterTopology, HostBinding}, +}; +// ------------------------------------------------------------------------------------------------- +// Step 03: Control Plane +// - Render per-MAC PXE & ignition for cp0/cp1/cp2. +// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup03ControlPlaneScore {} + +impl Score for OKDSetup03ControlPlaneScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup03ControlPlaneScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup03ControlPlaneInterpret { + score: OKDSetup03ControlPlaneScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup03ControlPlaneInterpret { + pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + /// Ensures that three physical hosts are discovered and available for the ControlPlane role. + /// It will trigger discovery if not enough hosts are found. + async fn get_nodes( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result, InterpretError> { + const REQUIRED_HOSTS: usize = 3; + let repo = InventoryRepositoryFactory::build().await?; + let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; + + while control_plane_hosts.len() < REQUIRED_HOSTS { + info!( + "Discovery of {} control plane hosts in progress, current number {}", + REQUIRED_HOSTS, + control_plane_hosts.len() + ); + // This score triggers the discovery agent for a specific role. + DiscoverHostForRoleScore { + role: HostRole::ControlPlane, + } + .interpret(inventory, topology) + .await?; + control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; + } + + if control_plane_hosts.len() < REQUIRED_HOSTS { + Err(InterpretError::new(format!( + "OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.", + REQUIRED_HOSTS, + control_plane_hosts.len() + ))) + } else { + // Take exactly the number of required hosts to ensure consistency. + Ok(control_plane_hosts + .into_iter() + .take(REQUIRED_HOSTS) + .collect()) + } + } + + /// Configures DHCP host bindings for all control plane nodes. + async fn configure_host_binding( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + nodes: &Vec, + ) -> Result<(), InterpretError> { + info!("[ControlPlane] Configuring host bindings for control plane nodes."); + + // Ensure the topology definition matches the number of physical nodes found. + if topology.control_plane.len() != nodes.len() { + return Err(InterpretError::new(format!( + "Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).", + topology.control_plane.len(), + nodes.len() + ))); + } + + // Create a binding for each physical host to its corresponding logical host. + let bindings: Vec = topology + .control_plane + .iter() + .zip(nodes.iter()) + .map(|(logical_host, physical_host)| { + info!( + "Creating binding: Logical Host '{}' -> Physical Host ID '{}'", + logical_host.name, physical_host.id + ); + HostBinding { + logical_host: logical_host.clone(), + physical_host: physical_host.clone(), + } + }) + .collect(); + + DhcpHostBindingScore { + host_binding: bindings, + domain: Some(topology.domain_name.clone()), + } + .interpret(inventory, topology) + .await?; + + Ok(()) + } + + /// Renders and deploys a per-MAC iPXE boot file for each control plane node. + async fn configure_ipxe( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + nodes: &Vec, + ) -> Result<(), InterpretError> { + info!("[ControlPlane] Rendering per-MAC iPXE configurations."); + + // The iPXE script content is the same for all control plane nodes, + // pointing to the 'master.ign' ignition file. + let content = BootstrapIpxeTpl { + http_ip: &topology.http_server.get_ip().to_string(), + scos_path: "scos", + ignition_http_path: "okd_ignition_files", + installation_device: "/dev/sda", // This might need to be configurable per-host in the future + ignition_file_name: "master.ign", // Control plane nodes use the master ignition file + } + .to_string(); + + debug!("[ControlPlane] iPXE content template:\n{}", content); + + // Create and apply an iPXE boot file for each node. + for node in nodes { + let mac_address = node.get_mac_address(); + if mac_address.is_empty() { + return Err(InterpretError::new(format!( + "Physical host with ID '{}' has no MAC addresses defined.", + node.id + ))); + } + info!( + "[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}", + node.id, mac_address + ); + + IPxeMacBootFileScore { + mac_address, + content: content.clone(), + } + .interpret(inventory, topology) + .await?; + } + + Ok(()) + } + + /// Prompts the user to reboot the target control plane nodes. + async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { + let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); + info!( + "[ControlPlane] Requesting reboot for control plane nodes: {:?}", + node_ids + ); + + let confirmation = inquire::Confirm::new( + &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), + ) + .prompt() + .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + + if !confirmation { + return Err(InterpretError::new( + "User aborted the operation.".to_string(), + )); + } + + Ok(()) + } + + /// Placeholder for automating network bonding configuration. + async fn persist_network_bond(&self) -> Result<(), InterpretError> { + // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. + info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); + inquire::Confirm::new( + "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", + ) + .prompt() + .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup03ControlPlaneInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup03ControlPlane") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result { + // 1. Ensure we have 3 physical hosts for the control plane. + let nodes = self.get_nodes(inventory, topology).await?; + + // 2. Create DHCP reservations for the control plane nodes. + self.configure_host_binding(inventory, topology, &nodes) + .await?; + + // 3. Create iPXE files for each control plane node to boot from the master ignition. + self.configure_ipxe(inventory, topology, &nodes).await?; + + // 4. Reboot the nodes to start the OS installation. + self.reboot_targets(&nodes).await?; + + // 5. Placeholder for post-boot network configuration (e.g., bonding). + self.persist_network_bond().await?; + + // TODO: Implement a step to wait for the control plane nodes to join the cluster + // and for the cluster operators to become available. This would be similar to + // the `wait-for bootstrap-complete` command. + info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually."); + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Control plane provisioning has been successfully initiated.".into(), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_04_workers.rs b/harmony/src/modules/okd/bootstrap_04_workers.rs new file mode 100644 index 0000000..d5ed87c --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_04_workers.rs @@ -0,0 +1,102 @@ +use std::{fmt::Write, path::PathBuf}; + +use async_trait::async_trait; +use derive_new::new; +use harmony_secret::SecretManager; +use harmony_types::id::Id; +use log::{debug, error, info, warn}; +use serde::{Deserialize, Serialize}; +use tokio::{fs::File, io::AsyncWriteExt, process::Command}; + +use crate::{ + config::secret::{RedhatSecret, SshKeyPair}, + data::{FileContent, FilePath, Version}, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + instrumentation::{HarmonyEvent, instrument}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, + http::{IPxeMacBootFileScore, StaticFilesHttpScore}, + inventory::LaunchDiscoverInventoryAgentScore, + okd::{ + bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, + templates::{BootstrapIpxeTpl, InstallConfigYaml}, + }, + }, + score::Score, + topology::{HAClusterTopology, HostBinding}, +}; +// ------------------------------------------------------------------------------------------------- +// Step 04: Workers +// - Render per-MAC PXE & ignition for workers; join nodes. +// - Persist bonding via MC/NNCP as required (same approach as masters). +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup04WorkersScore {} + +impl Score for OKDSetup04WorkersScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup04WorkersInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup04WorkersScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup04WorkersInterpret { + score: OKDSetup04WorkersScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup04WorkersInterpret { + pub fn new(score: OKDSetup04WorkersScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn render_and_reboot(&self) -> Result<(), InterpretError> { + info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup04WorkersInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup04Workers") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &HAClusterTopology, + ) -> Result { + self.render_and_reboot().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Workers provisioned".into(), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_05_sanity_check.rs b/harmony/src/modules/okd/bootstrap_05_sanity_check.rs new file mode 100644 index 0000000..f1a4c2a --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_05_sanity_check.rs @@ -0,0 +1,101 @@ +use std::{fmt::Write, path::PathBuf}; + +use async_trait::async_trait; +use derive_new::new; +use harmony_secret::SecretManager; +use harmony_types::id::Id; +use log::{debug, error, info, warn}; +use serde::{Deserialize, Serialize}; +use tokio::{fs::File, io::AsyncWriteExt, process::Command}; + +use crate::{ + config::secret::{RedhatSecret, SshKeyPair}, + data::{FileContent, FilePath, Version}, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + instrumentation::{HarmonyEvent, instrument}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, + http::{IPxeMacBootFileScore, StaticFilesHttpScore}, + inventory::LaunchDiscoverInventoryAgentScore, + okd::{ + bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, + templates::{BootstrapIpxeTpl, InstallConfigYaml}, + }, + }, + score::Score, + topology::{HAClusterTopology, HostBinding}, +}; +// ------------------------------------------------------------------------------------------------- +// Step 05: Sanity Check +// - Validate API reachability, ClusterOperators, ingress, and SDN status. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup05SanityCheckScore {} + +impl Score for OKDSetup05SanityCheckScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup05SanityCheckScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup05SanityCheckInterpret { + score: OKDSetup05SanityCheckScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup05SanityCheckInterpret { + pub fn new(score: OKDSetup05SanityCheckScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn run_checks(&self) -> Result<(), InterpretError> { + info!("[Sanity] Checking API, COs, Ingress, and SDN health …"); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup05SanityCheckInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup05SanityCheck") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &HAClusterTopology, + ) -> Result { + self.run_checks().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Sanity checks passed".into(), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_06_installation_report.rs b/harmony/src/modules/okd/bootstrap_06_installation_report.rs new file mode 100644 index 0000000..2713bd2 --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_06_installation_report.rs @@ -0,0 +1,101 @@ +// ------------------------------------------------------------------------------------------------- +use async_trait::async_trait; +use derive_new::new; +use harmony_secret::SecretManager; +use harmony_types::id::Id; +use log::{debug, error, info, warn}; +use serde::{Deserialize, Serialize}; +use std::{fmt::Write, path::PathBuf}; +use tokio::{fs::File, io::AsyncWriteExt, process::Command}; + +use crate::{ + config::secret::{RedhatSecret, SshKeyPair}, + data::{FileContent, FilePath, Version}, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + instrumentation::{HarmonyEvent, instrument}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, + http::{IPxeMacBootFileScore, StaticFilesHttpScore}, + inventory::LaunchDiscoverInventoryAgentScore, + okd::{ + bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, + templates::{BootstrapIpxeTpl, InstallConfigYaml}, + }, + }, + score::Score, + topology::{HAClusterTopology, HostBinding}, +}; + +// Step 06: Installation Report +// - Emit JSON and concise human summary of nodes, roles, versions, and health. +// ------------------------------------------------------------------------------------------------- + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDSetup06InstallationReportScore {} + +impl Score for OKDSetup06InstallationReportScore { + fn create_interpret(&self) -> Box> { + Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "OKDSetup06InstallationReportScore".to_string() + } +} + +#[derive(Debug, Clone)] +pub struct OKDSetup06InstallationReportInterpret { + score: OKDSetup06InstallationReportScore, + version: Version, + status: InterpretStatus, +} + +impl OKDSetup06InstallationReportInterpret { + pub fn new(score: OKDSetup06InstallationReportScore) -> Self { + let version = Version::from("1.0.0").unwrap(); + Self { + version, + score, + status: InterpretStatus::QUEUED, + } + } + + async fn generate(&self) -> Result<(), InterpretError> { + info!("[Report] Generating OKD installation report",); + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDSetup06InstallationReportInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDSetup06InstallationReport") + } + + fn get_version(&self) -> Version { + self.version.clone() + } + + fn get_status(&self) -> InterpretStatus { + self.status.clone() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + _topology: &HAClusterTopology, + ) -> Result { + self.generate().await?; + Ok(Outcome::new( + InterpretStatus::SUCCESS, + "Installation report generated".into(), + )) + } +} diff --git a/harmony/src/modules/okd/bootstrap_dhcp.rs b/harmony/src/modules/okd/bootstrap_dhcp.rs index c7ffe7d..c8f323d 100644 --- a/harmony/src/modules/okd/bootstrap_dhcp.rs +++ b/harmony/src/modules/okd/bootstrap_dhcp.rs @@ -37,21 +37,23 @@ impl OKDBootstrapDhcpScore { .clone(), }); // TODO refactor this so it is not copy pasted from dhcp.rs - Self { - dhcp_score: DhcpScore::new( - host_binding, - // TODO : we should add a tftp server to the topology instead of relying on the - // router address, this is leaking implementation details - Some(topology.router.get_gateway()), - None, // To allow UEFI boot we cannot provide a legacy file - Some("undionly.kpxe".to_string()), - Some("ipxe.efi".to_string()), - Some(format!( - "http://{}:8080/boot.ipxe", - topology.router.get_gateway() - )), - ), - } + todo!("Add dhcp range") + // Self { + // dhcp_score: DhcpScore::new( + // host_binding, + // // TODO : we should add a tftp server to the topology instead of relying on the + // // router address, this is leaking implementation details + // Some(topology.router.get_gateway()), + // None, // To allow UEFI boot we cannot provide a legacy file + // Some("undionly.kpxe".to_string()), + // Some("ipxe.efi".to_string()), + // Some(format!( + // "http://{}:8080/boot.ipxe", + // topology.router.get_gateway() + // )), + // (self.), + // ), + // } } } diff --git a/harmony/src/modules/okd/bootstrap_load_balancer.rs b/harmony/src/modules/okd/bootstrap_load_balancer.rs index d6cd2f3..ccc69c9 100644 --- a/harmony/src/modules/okd/bootstrap_load_balancer.rs +++ b/harmony/src/modules/okd/bootstrap_load_balancer.rs @@ -8,7 +8,7 @@ use crate::{ score::Score, topology::{ BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, - LoadBalancerService, Topology, + LoadBalancerService, SSL, Topology, }, }; @@ -44,6 +44,7 @@ impl OKDBootstrapLoadBalancerScore { "/readyz".to_string(), HttpMethod::GET, HttpStatusCode::Success2xx, + SSL::SSL, )), }, ]; @@ -54,6 +55,7 @@ impl OKDBootstrapLoadBalancerScore { }, } } + fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec { let mut backend: Vec<_> = topology .control_plane @@ -63,6 +65,14 @@ impl OKDBootstrapLoadBalancerScore { port, }) .collect(); + + topology.workers.iter().for_each(|worker| { + backend.push(BackendServer { + address: worker.ip.to_string(), + port, + }) + }); + backend.push(BackendServer { address: topology.bootstrap_host.ip.to_string(), port, diff --git a/harmony/src/modules/okd/dhcp.rs b/harmony/src/modules/okd/dhcp.rs index 3386592..94e7e55 100644 --- a/harmony/src/modules/okd/dhcp.rs +++ b/harmony/src/modules/okd/dhcp.rs @@ -1,3 +1,6 @@ +use std::net::Ipv4Addr; + +use harmony_types::net::IpAddress; use serde::Serialize; use crate::{ @@ -44,6 +47,16 @@ impl OKDDhcpScore { }) }); + let dhcp_server_ip = match topology.dhcp_server.get_ip() { + std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, + std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), + }; + + // TODO this could overflow, we should use proper subnet maths here instead of an ip + // address and guessing the subnet size from there + let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); + let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); + Self { // TODO : we should add a tftp server to the topology instead of relying on the // router address, this is leaking implementation details @@ -57,6 +70,8 @@ impl OKDDhcpScore { "http://{}:8080/boot.ipxe", topology.router.get_gateway() )), + dhcp_range: (IpAddress::from(start), IpAddress::from(end)), + domain: Some(topology.domain_name.clone()), }, } } diff --git a/harmony/src/modules/okd/installation.rs b/harmony/src/modules/okd/installation.rs index 58b6942..72603c8 100644 --- a/harmony/src/modules/okd/installation.rs +++ b/harmony/src/modules/okd/installation.rs @@ -44,795 +44,30 @@ //! which must be configured on host AND switch to connect properly. //! //! Configuration knobs -//! - lan_cidr: CIDR to scan/allow for discovery endpoints. //! - public_domain: External wildcard/apps domain (e.g., apps.example.com). //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd). -use async_trait::async_trait; -use derive_new::new; -use harmony_macros::ip; -use harmony_types::id::Id; -use log::info; -use serde::{Deserialize, Serialize}; - use crate::{ - data::Version, - instrumentation::{HarmonyEvent, instrument}, - interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::Inventory, + modules::okd::{ + OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, + OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, + bootstrap_06_installation_report::OKDSetup06InstallationReportScore, + }, score::Score, - topology::{DnsRecord, DnsRecordType, DnsServer, Topology}, + topology::HAClusterTopology, }; -// ------------------------------------------------------------------------------------------------- -// Public Orchestrator Score -// ------------------------------------------------------------------------------------------------- +pub struct OKDInstallationPipeline; -#[derive(Debug, Clone, Serialize, Deserialize, new)] -pub struct OKDInstallationScore { - /// The LAN CIDR where discovery endpoints live (e.g., 192.168.10.0/24) - pub lan_cidr: String, - /// Public external domain (e.g., example.com). Used for api/apps wildcard, etc. - pub public_domain: String, - /// Internal cluster domain (e.g., harmony.mcd). Used for internal svc/ingress and DNS. - pub internal_domain: String, -} - -impl Score for OKDInstallationScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDInstallationInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDInstallationScore".to_string() - } -} - -// ------------------------------------------------------------------------------------------------- -// Orchestrator Interpret -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone)] -pub struct OKDInstallationInterpret { - score: OKDInstallationScore, - version: Version, - status: InterpretStatus, -} - -impl OKDInstallationInterpret { - pub fn new(score: OKDInstallationScore) -> Self { - let version = Version::from("0.1.0").expect("valid version"); - Self { - score, - version, - status: InterpretStatus::QUEUED, - } - } - - async fn run_inventory_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - // 1) Prepare DNS and DHCP lease registration (optional) - let dns_score = OKDSetup01InventoryDnsScore::new( - self.score.internal_domain.clone(), - self.score.public_domain.clone(), - Some(true), // register_dhcp_leases - ); - dns_score.interpret(inventory, topology).await?; - - // 2) Serve default iPXE + Kickstart and poll discovery - let discovery_score = OKDSetup01InventoryScore::new(self.score.lan_cidr.clone()); - discovery_score.interpret(inventory, topology).await?; - - Ok(()) - } - - async fn run_bootstrap_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - // Select and provision bootstrap - let bootstrap_score = OKDSetup02BootstrapScore::new( - self.score.public_domain.clone(), - self.score.internal_domain.clone(), - ); - bootstrap_score.interpret(inventory, topology).await?; - Ok(()) - } - - async fn run_control_plane_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - let control_plane_score = OKDSetup03ControlPlaneScore::new(); - control_plane_score.interpret(inventory, topology).await?; - Ok(()) - } - - async fn run_workers_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - let workers_score = OKDSetup04WorkersScore::new(); - workers_score.interpret(inventory, topology).await?; - Ok(()) - } - - async fn run_sanity_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - let sanity_score = OKDSetup05SanityCheckScore::new(); - sanity_score.interpret(inventory, topology).await?; - Ok(()) - } - - async fn run_report_phase( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result<(), InterpretError> { - let report_score = OKDSetup06InstallationReportScore::new( - self.score.public_domain.clone(), - self.score.internal_domain.clone(), - ); - report_score.interpret(inventory, topology).await?; - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDInstallationInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDInstallationInterpret") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - inventory: &Inventory, - topology: &T, - ) -> Result { - instrument(HarmonyEvent::HarmonyStarted).ok(); - - info!( - "Starting OKD installation pipeline for public_domain={} internal_domain={} lan_cidr={}", - self.score.public_domain, self.score.internal_domain, self.score.lan_cidr - ); - - self.run_inventory_phase(inventory, topology).await?; - - self.run_bootstrap_phase(inventory, topology).await?; - - self.run_control_plane_phase(inventory, topology).await?; - - self.run_workers_phase(inventory, topology).await?; - - self.run_sanity_phase(inventory, topology).await?; - - self.run_report_phase(inventory, topology).await?; - - instrument(HarmonyEvent::HarmonyFinished).ok(); - - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "OKD installation pipeline completed".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 01: Inventory DNS setup -// - Keep DHCP simple; optionally register dynamic leases into DNS. -// - Ensure base records for internal/public domains (api/api-int/apps wildcard). -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup01InventoryDnsScore { - internal_domain: String, - public_domain: String, - register_dhcp_leases: Option, -} - -impl Score for OKDSetup01InventoryDnsScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup01InventoryDnsInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup01InventoryDnsScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup01InventoryDnsInterpret { - score: OKDSetup01InventoryDnsScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup01InventoryDnsInterpret { - pub fn new(score: OKDSetup01InventoryDnsScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn ensure_dns(&self, dns: &T) -> Result<(), InterpretError> { - // Minimal records placeholders; real IPs are set elsewhere in the flow. - // We register the names early to ensure resolvability for clients relying on DNS. - let mut records: Vec = vec![ - DnsRecord { - value: ip!("0.0.0.0"), - host: "api".to_string(), - domain: self.score.internal_domain.clone(), - record_type: DnsRecordType::A, - }, - DnsRecord { - value: ip!("0.0.0.0"), - host: "api-int".to_string(), - domain: self.score.internal_domain.clone(), - record_type: DnsRecordType::A, - }, - DnsRecord { - value: ip!("0.0.0.0"), - host: "*.apps.".to_string(), - domain: self.score.internal_domain.clone(), - record_type: DnsRecordType::A, - }, - ]; - dns.ensure_hosts_registered(records.drain(..).collect()) - .await?; - if let Some(register) = self.score.register_dhcp_leases { - dns.register_dhcp_leases(register).await?; - } - dns.commit_config().await?; - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup01InventoryDnsInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup01InventoryDns") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - topology: &T, - ) -> Result { - info!("Ensuring base DNS and DHCP lease registration for discovery phase"); - self.ensure_dns(topology).await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Inventory DNS prepared".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent) -// - This score exposes/ensures the default inventory assets and waits for discoveries. -// - No early bonding. Simple access DHCP. -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup01InventoryScore { - lan_cidr: String, -} - -impl Score for OKDSetup01InventoryScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup01InventoryInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup01InventoryScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup01InventoryInterpret { - score: OKDSetup01InventoryScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup01InventoryInterpret { - pub fn new(score: OKDSetup01InventoryScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn ensure_inventory_assets( - &self, - topology: &T, - ) -> Result<(), InterpretError> { - // Placeholder: push or verify iPXE default, Kickstart, and Rust inventory agent are hosted. - // Real implementation: publish to the PXE/HTTP server via the topology. - info!( - "[Inventory] Ensuring default iPXE, Kickstart, and inventory agent are available for LAN {}", - self.score.lan_cidr - ); - // topology.publish_http_asset(…) ? - Ok(()) - } - - async fn discover_nodes(&self) -> Result { - // Placeholder: implement Harmony discovery logic (scan/pull/push mode). - // Returns number of newly discovered nodes. - info!( - "[Inventory] Scanning for inventory agents in {}", - self.score.lan_cidr - ); - // In practice, this would query harmony_composer or a local registry store. - Ok(3) - } -} - -#[async_trait] -impl Interpret for OKDSetup01InventoryInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup01Inventory") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - topology: &T, - ) -> Result { - self.ensure_inventory_assets(topology).await?; - let count = self.discover_nodes().await?; - info!("[Inventory] Discovered {count} nodes"); - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!("Inventory phase complete. Nodes discovered: {count}"), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 02: Bootstrap -// - Select bootstrap node (from discovered set). -// - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition. -// - Reboot the host via SSH and wait for bootstrap-complete. -// - No bonding at this stage unless absolutely required; prefer persistence via MC later. -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup02BootstrapScore { - public_domain: String, - internal_domain: String, -} - -impl Score for OKDSetup02BootstrapScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup02BootstrapInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup02BootstrapScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup02BootstrapInterpret { - score: OKDSetup02BootstrapScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup02BootstrapInterpret { - pub fn new(score: OKDSetup02BootstrapScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn render_per_mac_pxe(&self) -> Result<(), InterpretError> { - // Placeholder: use Harmony templates to emit {MAC}.ipxe selecting SCOS live + bootstrap ignition. - info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); - Ok(()) - } - - async fn reboot_target(&self) -> Result<(), InterpretError> { - // Placeholder: ssh reboot using the inventory ephemeral key - info!("[Bootstrap] Rebooting bootstrap node via SSH"); - Ok(()) - } - - async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> { - // Placeholder: wait-for bootstrap-complete - info!("[Bootstrap] Waiting for bootstrap-complete …"); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup02BootstrapInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup02Bootstrap") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - self.render_per_mac_pxe().await?; - self.reboot_target().await?; - self.wait_for_bootstrap_complete().await?; - - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Bootstrap phase complete".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 03: Control Plane -// - Render per-MAC PXE & ignition for cp0/cp1/cp2. -// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active. -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup03ControlPlaneScore {} - -impl Score for OKDSetup03ControlPlaneScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup03ControlPlaneScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup03ControlPlaneInterpret { - score: OKDSetup03ControlPlaneScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup03ControlPlaneInterpret { - pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn render_and_reboot(&self) -> Result<(), InterpretError> { - info!("[ControlPlane] Rendering per-MAC PXE for masters and rebooting"); - Ok(()) - } - - async fn persist_network_bond(&self) -> Result<(), InterpretError> { - // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. - info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup03ControlPlaneInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup03ControlPlane") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - self.render_and_reboot().await?; - self.persist_network_bond().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Control plane provisioned".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 04: Workers -// - Render per-MAC PXE & ignition for workers; join nodes. -// - Persist bonding via MC/NNCP as required (same approach as masters). -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup04WorkersScore {} - -impl Score for OKDSetup04WorkersScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup04WorkersInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup04WorkersScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup04WorkersInterpret { - score: OKDSetup04WorkersScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup04WorkersInterpret { - pub fn new(score: OKDSetup04WorkersScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn render_and_reboot(&self) -> Result<(), InterpretError> { - info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup04WorkersInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup04Workers") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - self.render_and_reboot().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Workers provisioned".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 05: Sanity Check -// - Validate API reachability, ClusterOperators, ingress, and SDN status. -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup05SanityCheckScore {} - -impl Score for OKDSetup05SanityCheckScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup05SanityCheckScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup05SanityCheckInterpret { - score: OKDSetup05SanityCheckScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup05SanityCheckInterpret { - pub fn new(score: OKDSetup05SanityCheckScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn run_checks(&self) -> Result<(), InterpretError> { - info!("[Sanity] Checking API, COs, Ingress, and SDN health …"); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup05SanityCheckInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup05SanityCheck") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - self.run_checks().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Sanity checks passed".into(), - )) - } -} - -// ------------------------------------------------------------------------------------------------- -// Step 06: Installation Report -// - Emit JSON and concise human summary of nodes, roles, versions, and health. -// ------------------------------------------------------------------------------------------------- - -#[derive(Debug, Clone, Serialize, new)] -struct OKDSetup06InstallationReportScore { - public_domain: String, - internal_domain: String, -} - -impl Score for OKDSetup06InstallationReportScore { - fn create_interpret(&self) -> Box> { - Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) - } - - fn name(&self) -> String { - "OKDSetup06InstallationReportScore".to_string() - } -} - -#[derive(Debug, Clone)] -struct OKDSetup06InstallationReportInterpret { - score: OKDSetup06InstallationReportScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup06InstallationReportInterpret { - pub fn new(score: OKDSetup06InstallationReportScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn generate(&self) -> Result<(), InterpretError> { - info!( - "[Report] Generating installation report for {} / {}", - self.score.public_domain, self.score.internal_domain - ); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup06InstallationReportInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup06InstallationReport") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - self.generate().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Installation report generated".into(), - )) +impl OKDInstallationPipeline { + pub async fn get_all_scores() -> Vec>> { + vec![ + Box::new(OKDSetup01InventoryScore::new()), + Box::new(OKDSetup02BootstrapScore::new()), + Box::new(OKDSetup03ControlPlaneScore::new()), + Box::new(OKDSetup04WorkersScore::new()), + Box::new(OKDSetup05SanityCheckScore::new()), + Box::new(OKDSetup06InstallationReportScore::new()), + ] } } diff --git a/harmony/src/modules/okd/ipxe.rs b/harmony/src/modules/okd/ipxe.rs index 38de035..81987aa 100644 --- a/harmony/src/modules/okd/ipxe.rs +++ b/harmony/src/modules/okd/ipxe.rs @@ -1,9 +1,9 @@ use askama::Template; use async_trait::async_trait; use derive_new::new; -use harmony_types::net::Url; +use harmony_types::net::{IpAddress, Url}; use serde::Serialize; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr}; use crate::{ data::{FileContent, FilePath, Version}, @@ -16,29 +16,31 @@ use crate::{ use harmony_types::id::Id; #[derive(Debug, new, Clone, Serialize)] -pub struct OkdIpxeScore { +pub struct OKDIpxeScore { pub kickstart_filename: String, pub harmony_inventory_agent: String, - pub cluster_pubkey_filename: String, + pub cluster_pubkey: FileContent, } -impl Score for OkdIpxeScore { +impl Score for OKDIpxeScore { fn create_interpret(&self) -> Box> { - Box::new(IpxeInterpret::new(self.clone())) + Box::new(OKDIpxeInterpret::new(self.clone())) } fn name(&self) -> String { - "OkdIpxeScore".to_string() + "OKDipxeScore".to_string() } } #[derive(Debug, new, Clone)] -pub struct IpxeInterpret { - score: OkdIpxeScore, +pub struct OKDIpxeInterpret { + score: OKDIpxeScore, } #[async_trait] -impl Interpret for IpxeInterpret { +impl Interpret + for OKDIpxeInterpret +{ async fn execute( &self, inventory: &Inventory, @@ -46,19 +48,32 @@ impl Interpret f ) -> Result { let gateway_ip = topology.get_gateway(); + let dhcp_server_ip = match DhcpServer::get_ip(topology) { + std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, + std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), + }; + + // TODO this could overflow, we should use proper subnet maths here instead of an ip + // address and guessing the subnet size from there + let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); + let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); + let scores: Vec>> = vec![ Box::new(DhcpScore { host_binding: vec![], + domain: None, next_server: Some(topology.get_gateway()), boot_filename: None, filename: Some("undionly.kpxe".to_string()), filename64: Some("ipxe.efi".to_string()), filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), + dhcp_range: (IpAddress::from(start), IpAddress::from(end)), }), Box::new(TftpScore { files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), }), Box::new(StaticFilesHttpScore { + remote_path: None, // TODO The current russh based copy is way too slow, check for a lib update or use scp // when available // @@ -80,7 +95,7 @@ impl Interpret f content: InventoryKickstartTpl { gateway_ip: &gateway_ip, harmony_inventory_agent: &self.score.harmony_inventory_agent, - cluster_pubkey_filename: &self.score.cluster_pubkey_filename, + cluster_pubkey_filename: &self.score.cluster_pubkey.path.to_string(), } .to_string(), }, @@ -92,6 +107,7 @@ impl Interpret f } .to_string(), }, + self.score.cluster_pubkey.clone(), ], }), ]; @@ -107,6 +123,7 @@ impl Interpret f Err(e) => return Err(e), }; } + inquire::Confirm::new(&format!("Execute the copy : `scp -r data/pxe/okd/http_files/* root@{}:/usr/local/http/` and confirm when done to continue", HttpServer::get_ip(topology))).prompt().expect("Prompt error"); Ok(Outcome::success("Ipxe installed".to_string())) } diff --git a/harmony/src/modules/okd/load_balancer.rs b/harmony/src/modules/okd/load_balancer.rs index eb1ed44..8a2b57f 100644 --- a/harmony/src/modules/okd/load_balancer.rs +++ b/harmony/src/modules/okd/load_balancer.rs @@ -8,7 +8,7 @@ use crate::{ score::Score, topology::{ BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, - LoadBalancerService, Topology, + LoadBalancerService, SSL, Topology, }, }; @@ -62,6 +62,7 @@ impl OKDLoadBalancerScore { "/readyz".to_string(), HttpMethod::GET, HttpStatusCode::Success2xx, + SSL::SSL, )), }, ]; diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index b5ba462..1bd4514 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -1,3 +1,9 @@ +mod bootstrap_01_prepare; +mod bootstrap_02_bootstrap; +mod bootstrap_03_control_plane; +mod bootstrap_04_workers; +mod bootstrap_05_sanity_check; +mod bootstrap_06_installation_report; pub mod bootstrap_dhcp; pub mod bootstrap_load_balancer; pub mod dhcp; @@ -5,4 +11,11 @@ pub mod dns; pub mod installation; pub mod ipxe; pub mod load_balancer; +pub mod templates; pub mod upgrade; +pub use bootstrap_01_prepare::*; +pub use bootstrap_02_bootstrap::*; +pub use bootstrap_03_control_plane::*; +pub use bootstrap_04_workers::*; +pub use bootstrap_05_sanity_check::*; +pub use bootstrap_06_installation_report::*; diff --git a/harmony/src/modules/okd/templates.rs b/harmony/src/modules/okd/templates.rs new file mode 100644 index 0000000..2e1494e --- /dev/null +++ b/harmony/src/modules/okd/templates.rs @@ -0,0 +1,20 @@ +use askama::Template; + +#[derive(Template)] +#[template(path = "okd/install-config.yaml.j2")] +pub struct InstallConfigYaml<'a> { + pub cluster_domain: &'a str, + pub pull_secret: &'a str, + pub ssh_public_key: &'a str, + pub cluster_name: &'a str, +} + +#[derive(Template)] +#[template(path = "okd/bootstrap.ipxe.j2")] +pub struct BootstrapIpxeTpl<'a> { + pub http_ip: &'a str, + pub scos_path: &'a str, + pub installation_device: &'a str, + pub ignition_http_path: &'a str, + pub ignition_file_name: &'static str, +} diff --git a/harmony/src/modules/storage/ceph/ceph_validate_health_score.rs b/harmony/src/modules/storage/ceph/ceph_validate_health_score.rs index f6b43ec..ee331bc 100644 --- a/harmony/src/modules/storage/ceph/ceph_validate_health_score.rs +++ b/harmony/src/modules/storage/ceph/ceph_validate_health_score.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use async_trait::async_trait; use log::debug; diff --git a/harmony/templates/boot.ipxe.j2 b/harmony/templates/boot.ipxe.j2 index 94ea07b..55b74c6 100644 --- a/harmony/templates/boot.ipxe.j2 +++ b/harmony/templates/boot.ipxe.j2 @@ -1,6 +1,63 @@ #!ipxe +# iPXE Chainloading Script +# +# Attempts to load a host-specific configuration file. If that fails, +# it logs the failure, waits for a few seconds, and then attempts to +# load a generic fallback configuration. + +# --- Configuration --- set base-url http://{{ gateway_ip }}:8080 set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe +set fallbackfile ${base-url}/fallback.ipxe -chain ${hostfile} || chain ${base-url}/fallback.ipxe +# --- Script Logic --- + +echo +echo "========================================" +echo " iPXE Network Boot Initiated" +echo "========================================" +echo "Client MAC Address: ${mac}" +echo "Boot Server URL: ${base-url}" +echo + +# --- Primary Boot Attempt --- +echo "--> Attempting to load host-specific script..." +echo " Location: ${hostfile}" + +sleep 2 + +# The "&& exit ||" pattern works as follows: +# 1. iPXE attempts to 'chain' the hostfile. +# 2. If successful (returns 0), the "&& exit" part is executed, and this script terminates. +# 3. If it fails (returns non-zero), the "||" part is triggered, and execution continues below. +chain --autofree --replace ${hostfile} && exit || + +# --- Fallback Boot Attempt --- +# This part of the script is only reached if the 'chain ${hostfile}' command above failed. +echo +echo "--> Host-specific script not found or failed to load." +echo + +echo +echo "--> Attempting to load fallback script..." +echo " Location: ${fallbackfile}" + +sleep 8 + +chain --autofree --replace ${fallbackfile} && exit || + +# --- Final Failure --- +# This part is only reached if BOTH chain commands have failed. +echo +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +echo " FATAL: All boot scripts failed!" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +echo "Could not load either the host-specific script or the fallback script." +echo "Dropping to iPXE shell for manual troubleshooting in 10 seconds." +sleep 8 + +shell + +# A final exit is good practice, though 'shell' is a blocking command. +exit diff --git a/harmony/templates/okd/bootstrap.ipxe.j2 b/harmony/templates/okd/bootstrap.ipxe.j2 new file mode 100644 index 0000000..79b6fa6 --- /dev/null +++ b/harmony/templates/okd/bootstrap.ipxe.j2 @@ -0,0 +1,52 @@ +#!ipxe + +# ================================================================== +# MAC-Specific Boot Script for CoreOS/FCOS Installation +# ================================================================== + +# --- Configuration --- +set http_ip {{ http_ip }} +set scos_path {{ scos_path }} +set inst_dev {{ installation_device }} +set ign_path {{ ignition_http_path }} +set ign_file {{ ignition_file_name }} + +# --- Derived Variables --- +set base-url http://${http_ip}:8080 +set scos-base-url ${base-url}/${scos_path} +set ignition-url ${base-url}/${ign_path}/${ign_file} + +# --- Pre-boot Logging & Verification --- +echo +echo "Starting MAC-specific installation..." +echo "--------------------------------------------------" +echo " Installation Device: ${inst_dev}" +echo " CoreOS Kernel URL: ${scos-base-url}/scos-live-kernel.x86_64" +echo " Ignition URL: ${ignition-url}" +echo "--------------------------------------------------" +echo "Waiting for 3 seconds before loading boot assets..." +sleep 3 + +# --- Load Boot Assets with Failure Checks --- +# The '|| goto failure' pattern provides a clean exit if any asset fails to load. +echo "Loading kernel..." +kernel ${scos-base-url}/scos-live-kernel.x86_64 initrd=main coreos.live.rootfs_url=${scos-base-url}/scos-live-rootfs.x86_64.img coreos.inst.install_dev=${inst_dev} coreos.inst.ignition_url=${ignition-url} || goto failure + +echo "Loading initramfs..." +initrd --name main ${scos-base-url}/scos-live-initramfs.x86_64.img || goto failure + +# --- Boot --- +echo "All assets loaded successfully. Starting boot process..." +boot || goto failure + +# This part is never reached on successful boot. + +# --- Failure Handling --- +:failure +echo +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +echo " ERROR: A boot component failed to load." +echo " Dropping to iPXE shell for manual debugging." +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +sleep 10 +shell diff --git a/harmony/templates/okd/install-config.yaml.j2 b/harmony/templates/okd/install-config.yaml.j2 new file mode 100644 index 0000000..91ce3e7 --- /dev/null +++ b/harmony/templates/okd/install-config.yaml.j2 @@ -0,0 +1,24 @@ +# Built from https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-bare-metal-config-yaml_installing-bare-metal +apiVersion: v1 +baseDomain: {{ cluster_domain }} +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 +metadata: + name: {{ cluster_name }} +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OVNKubernetes + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +pullSecret: '{{ pull_secret|safe }}' +sshKey: '{{ ssh_public_key }}' diff --git a/harmony_secret/Cargo.toml b/harmony_secret/Cargo.toml index 88f93ac..36bfb31 100644 --- a/harmony_secret/Cargo.toml +++ b/harmony_secret/Cargo.toml @@ -18,6 +18,7 @@ infisical = { git = "https://github.com/jggc/rust-sdk.git", branch = "patch-1" } tokio.workspace = true async-trait.workspace = true http.workspace = true +inquire.workspace = true [dev-dependencies] pretty_assertions.workspace = true diff --git a/harmony_secret/src/lib.rs b/harmony_secret/src/lib.rs index 4f54d95..a4f636f 100644 --- a/harmony_secret/src/lib.rs +++ b/harmony_secret/src/lib.rs @@ -9,6 +9,7 @@ use config::INFISICAL_ENVIRONMENT; use config::INFISICAL_PROJECT_ID; use config::INFISICAL_URL; use config::SECRET_STORE; +use log::debug; use serde::{Serialize, de::DeserializeOwned}; use std::fmt; use store::InfisicalSecretStore; @@ -101,6 +102,7 @@ impl SecretManager { /// Retrieves and deserializes a secret. pub async fn get() -> Result { let manager = get_secret_manager().await; + debug!("Getting secret ns {} key {}", &manager.namespace, T::KEY); let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?; serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization { key: T::KEY.to_string(), @@ -108,6 +110,42 @@ impl SecretManager { }) } + pub async fn get_or_prompt() -> Result { + let secret = Self::get::().await; + let manager = get_secret_manager().await; + let prompted = secret.is_err(); + + let secret = secret.or_else(|e| -> Result { + debug!("Could not get secret : {e}"); + + let ns = &manager.namespace; + let key = T::KEY; + let secret_json = inquire::Text::new(&format!( + "Secret not found for {} {}, paste the JSON here :", + ns, key + )) + .prompt() + .map_err(|e| { + SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into()) + })?; + + let secret: T = serde_json::from_str(&secret_json).map_err(|e| { + SecretStoreError::Deserialization { + key: T::KEY.to_string(), + source: e, + } + })?; + + Ok(secret) + })?; + + if prompted { + Self::set(&secret).await?; + } + + Ok(secret) + } + /// Serializes and stores a secret. pub async fn set(secret: &T) -> Result<(), SecretStoreError> { let manager = get_secret_manager().await; diff --git a/harmony_secret/src/store/local_file.rs b/harmony_secret/src/store/local_file.rs index ed81c65..c277335 100644 --- a/harmony_secret/src/store/local_file.rs +++ b/harmony_secret/src/store/local_file.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use log::info; +use log::{debug, info}; use std::path::{Path, PathBuf}; use crate::{SecretStore, SecretStoreError}; @@ -24,7 +24,7 @@ impl SecretStore for LocalFileSecretStore { .join("secrets"); let file_path = Self::get_file_path(&data_dir, ns, key); - info!( + debug!( "LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}", file_path.display() ); diff --git a/harmony_types/src/id.rs b/harmony_types/src/id.rs index 98cf1b9..2cb2674 100644 --- a/harmony_types/src/id.rs +++ b/harmony_types/src/id.rs @@ -48,6 +48,12 @@ impl From for Id { } } +impl From for String { + fn from(value: Id) -> Self { + value.to_string() + } +} + impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(&self.value) diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index caf023f..f8d9e0e 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -21,7 +21,7 @@ impl From<&MacAddress> for String { impl std::fmt::Display for MacAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!("MacAddress {}", String::from(self))) + f.write_str(&String::from(self)) } } diff --git a/migrations/20250902035357_Host_role_mapping.sql b/migrations/20250902035357_Host_role_mapping.sql new file mode 100644 index 0000000..dce122d --- /dev/null +++ b/migrations/20250902035357_Host_role_mapping.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS host_role_mapping ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + host_id TEXT NOT NULL, + role TEXT NOT NULL +); diff --git a/opnsense-config-xml/src/data/dnsmasq.rs b/opnsense-config-xml/src/data/dnsmasq.rs index db2b8c1..fe76b66 100644 --- a/opnsense-config-xml/src/data/dnsmasq.rs +++ b/opnsense-config-xml/src/data/dnsmasq.rs @@ -36,6 +36,27 @@ pub struct DnsMasq { pub dhcp_options: Vec, pub dhcp_boot: Vec, pub dhcp_tags: Vec, + pub hosts: Vec, +} + +#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)] +#[yaserde(rename = "hosts")] +pub struct DnsmasqHost { + #[yaserde(attribute = true)] + pub uuid: String, + pub host: String, + pub domain: MaybeString, + pub local: MaybeString, + pub ip: MaybeString, + pub cnames: MaybeString, + pub client_id: MaybeString, + pub hwaddr: MaybeString, + pub lease_time: MaybeString, + pub ignore: Option, + pub set_tag: MaybeString, + pub descr: MaybeString, + pub comments: MaybeString, + pub aliases: MaybeString, } // Represents the element and its nested fields. diff --git a/opnsense-config-xml/src/data/opnsense.rs b/opnsense-config-xml/src/data/opnsense.rs index c39f1c5..fa5f985 100644 --- a/opnsense-config-xml/src/data/opnsense.rs +++ b/opnsense-config-xml/src/data/opnsense.rs @@ -189,7 +189,7 @@ pub struct System { pub timeservers: String, pub webgui: WebGui, pub usevirtualterminal: u8, - pub disablenatreflection: String, + pub disablenatreflection: Option, pub disableconsolemenu: u8, pub disablevlanhwfilter: u8, pub disablechecksumoffloading: u8, @@ -256,7 +256,7 @@ pub struct Firmware { #[yaserde(rename = "type")] pub firmware_type: MaybeString, pub subscription: MaybeString, - pub reboot: MaybeString, + pub reboot: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] @@ -267,12 +267,12 @@ pub struct Bogons { #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Group { pub name: String, - pub description: String, + pub description: Option, pub scope: String, pub gid: u32, - pub member: Vec, + pub member: String, #[yaserde(rename = "priv")] - pub priv_field: String, + pub priv_field: Option, pub source_networks: Option, } @@ -1449,6 +1449,9 @@ pub struct Vip { pub advbase: Option, pub advskew: Option, pub descr: Option, + pub peer: Option, + pub peer6: Option, + pub nosync: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] diff --git a/opnsense-config/Cargo.toml b/opnsense-config/Cargo.toml index e70bc12..0580cb2 100644 --- a/opnsense-config/Cargo.toml +++ b/opnsense-config/Cargo.toml @@ -21,6 +21,7 @@ serde_json = "1.0.133" tokio-util = { version = "0.7.13", features = ["codec"] } tokio-stream = "0.1.17" uuid.workspace = true +sha2 = "0.10.9" [dev-dependencies] pretty_assertions.workspace = true diff --git a/opnsense-config/src/config/config.rs b/opnsense-config/src/config/config.rs index 56cd503..c2d0f60 100644 --- a/opnsense-config/src/config/config.rs +++ b/opnsense-config/src/config/config.rs @@ -1,10 +1,10 @@ use std::sync::Arc; use crate::{ - config::{SshConfigManager, SshCredentials, SshOPNSenseShell}, + config::{check_hash, get_hash, SshConfigManager, SshCredentials, SshOPNSenseShell}, error::Error, modules::{ - caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::DnsConfig, + caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig, dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig, }, }; @@ -12,6 +12,7 @@ use log::{debug, info, trace, warn}; use opnsense_config_xml::OPNsense; use russh::client; use serde::Serialize; +use sha2::Digest; use super::{ConfigManager, OPNsenseShell}; @@ -20,6 +21,7 @@ pub struct Config { opnsense: OPNsense, repository: Arc, shell: Arc, + hash: String, } impl Serialize for Config { @@ -36,8 +38,10 @@ impl Config { repository: Arc, shell: Arc, ) -> Result { + let (opnsense, hash) = Self::get_opnsense_instance(repository.clone()).await?; Ok(Self { - opnsense: Self::get_opnsense_instance(repository.clone()).await?, + opnsense, + hash, repository, shell, }) @@ -51,8 +55,8 @@ impl Config { DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) } - pub fn dns(&mut self) -> DnsConfig<'_> { - DnsConfig::new(&mut self.opnsense) + pub fn dns(&mut self) -> DhcpConfigDnsMasq<'_> { + DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) } pub fn tftp(&mut self) -> TftpConfig<'_> { @@ -146,7 +150,7 @@ impl Config { async fn reload_config(&mut self) -> Result<(), Error> { info!("Reloading opnsense live config"); - self.opnsense = Self::get_opnsense_instance(self.repository.clone()).await?; + let (opnsense, sha2) = Self::get_opnsense_instance(self.repository.clone()).await?; Ok(()) } @@ -158,14 +162,15 @@ impl Config { /// Save the config to the repository. This method is meant NOT to reload services, only save /// the config to the live file/database and perhaps take a backup when relevant. pub async fn save(&self) -> Result<(), Error> { - self.repository.save_config(&self.opnsense.to_xml()).await + let xml = &self.opnsense.to_xml(); + self.repository.save_config(xml, &self.hash).await } /// Save the configuration and reload all services. Be careful with this one as it will cause /// downtime in many cases, such as a PPPoE renegociation pub async fn apply(&self) -> Result<(), Error> { self.repository - .apply_new_config(&self.opnsense.to_xml()) + .apply_new_config(&self.opnsense.to_xml(), &self.hash) .await } @@ -193,11 +198,14 @@ impl Config { Config::new(manager, shell).await.unwrap() } - async fn get_opnsense_instance(repository: Arc) -> Result { + async fn get_opnsense_instance( + repository: Arc, + ) -> Result<(OPNsense, String), Error> { let xml = repository.load_as_str().await?; trace!("xml {}", xml); - Ok(OPNsense::from(xml)) + let hash = get_hash(&xml); + Ok((OPNsense::from(xml), hash)) } pub async fn run_command(&self, command: &str) -> Result { @@ -219,13 +227,14 @@ mod tests { #[tokio::test] async fn test_load_config_from_local_file() { for path in [ - "src/tests/data/config-opnsense-25.1.xml", - "src/tests/data/config-vm-test.xml", + // "src/tests/data/config-opnsense-25.1.xml", + // "src/tests/data/config-vm-test.xml", "src/tests/data/config-structure.xml", "src/tests/data/config-full-1.xml", - "src/tests/data/config-full-ncd0.xml", - "src/tests/data/config-full-25.7.xml", - "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml", + // "src/tests/data/config-full-ncd0.xml", + // "src/tests/data/config-full-25.7.xml", + // "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml", + "src/tests/data/config-25.7-dnsmasq-static-host.xml", ] { let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_file_path.push(path); @@ -243,13 +252,13 @@ mod tests { let serialized = config.opnsense.to_xml(); - fs::write("/tmp/serialized.xml", &serialized).unwrap(); - // Since the order of all fields is not always the same in opnsense config files // I think it is good enough to have exactly the same amount of the same lines - [config_file_str.lines().collect::>()].sort(); - [config_file_str.lines().collect::>()].sort(); - assert_eq!((), ()); + let mut before = config_file_str.lines().collect::>(); + let mut after = serialized.lines().collect::>(); + before.sort(); + after.sort(); + assert_eq!(before, after); } } @@ -279,8 +288,6 @@ mod tests { let serialized = config.opnsense.to_xml(); - fs::write("/tmp/serialized.xml", &serialized).unwrap(); - let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_file_path.push("src/tests/data/config-structure-with-dhcp-staticmap-entry.xml"); diff --git a/opnsense-config/src/config/manager/local_file.rs b/opnsense-config/src/config/manager/local_file.rs index 804b74c..e48f284 100644 --- a/opnsense-config/src/config/manager/local_file.rs +++ b/opnsense-config/src/config/manager/local_file.rs @@ -1,3 +1,4 @@ +use crate::config::check_hash; use crate::config::manager::ConfigManager; use crate::error::Error; use async_trait::async_trait; @@ -20,11 +21,17 @@ impl ConfigManager for LocalFileConfigManager { Ok(fs::read_to_string(&self.file_path)?) } - async fn save_config(&self, content: &str) -> Result<(), Error> { + async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { + let current_content = self.load_as_str().await?; + if !check_hash(¤t_content, hash) { + return Err(Error::Config(format!( + "OPNSense config file changed since loading it! Hash when loading : {hash}" + ))); + } Ok(fs::write(&self.file_path, content)?) } - async fn apply_new_config(&self, content: &str) -> Result<(), Error> { - self.save_config(content).await + async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { + self.save_config(content, hash).await } } diff --git a/opnsense-config/src/config/manager/mod.rs b/opnsense-config/src/config/manager/mod.rs index ad3a6b9..70831a4 100644 --- a/opnsense-config/src/config/manager/mod.rs +++ b/opnsense-config/src/config/manager/mod.rs @@ -9,6 +9,8 @@ use crate::Error; #[async_trait] pub trait ConfigManager: std::fmt::Debug + Send + Sync { async fn load_as_str(&self) -> Result; - async fn save_config(&self, content: &str) -> Result<(), Error>; - async fn apply_new_config(&self, content: &str) -> Result<(), Error>; + /// Save a new version of the config file, making sure that the hash still represents the file + /// currently stored in /conf/config.xml + async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error>; + async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error>; } diff --git a/opnsense-config/src/config/manager/ssh.rs b/opnsense-config/src/config/manager/ssh.rs index fb525ea..4b2fe64 100644 --- a/opnsense-config/src/config/manager/ssh.rs +++ b/opnsense-config/src/config/manager/ssh.rs @@ -1,8 +1,9 @@ use crate::config::{manager::ConfigManager, OPNsenseShell}; use crate::error::Error; use async_trait::async_trait; -use log::info; +use log::{info, warn}; use russh_keys::key::KeyPair; +use sha2::Digest; use std::sync::Arc; #[derive(Debug)] @@ -35,10 +36,10 @@ impl SshConfigManager { .await } - async fn move_to_live_config(&self, new_config_path: &str) -> Result { + async fn copy_to_live_config(&self, new_config_path: &str) -> Result { info!("Overwriting OPNSense /conf/config.xml with {new_config_path}"); self.opnsense_shell - .exec(&format!("mv {new_config_path} /conf/config.xml")) + .exec(&format!("cp {new_config_path} /conf/config.xml")) .await } @@ -56,19 +57,41 @@ impl ConfigManager for SshConfigManager { self.opnsense_shell.exec("cat /conf/config.xml").await } - async fn save_config(&self, content: &str) -> Result<(), Error> { + async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { + let current_content = self.load_as_str().await?; + + if !check_hash(¤t_content, hash) { + warn!("OPNSense config file changed since loading it! Hash when loading : {hash}"); + // return Err(Error::Config(format!( + // "OPNSense config file changed since loading it! Hash when loading : {hash}" + // ))); + } + let temp_filename = self .opnsense_shell .write_content_to_temp_file(content) .await?; self.backup_config_remote().await?; - self.move_to_live_config(&temp_filename).await?; + self.copy_to_live_config(&temp_filename).await?; Ok(()) } - async fn apply_new_config(&self, content: &str) -> Result<(), Error> { - self.save_config(content).await?; + async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { + self.save_config(content, &hash).await?; self.reload_all_services().await?; Ok(()) } } + +pub fn get_hash(content: &str) -> String { + let mut hasher = sha2::Sha256::new(); + hasher.update(content.as_bytes()); + let hash_bytes = hasher.finalize(); + let hash_string = format!("{:x}", hash_bytes); + info!("Loaded OPNSense config.xml with hash {hash_string:?}"); + hash_string +} + +pub fn check_hash(content: &str, source_hash: &str) -> bool { + get_hash(content) == source_hash +} diff --git a/opnsense-config/src/config/shell/ssh.rs b/opnsense-config/src/config/shell/ssh.rs index 95069fc..1f82c63 100644 --- a/opnsense-config/src/config/shell/ssh.rs +++ b/opnsense-config/src/config/shell/ssh.rs @@ -39,7 +39,7 @@ impl OPNsenseShell for SshOPNSenseShell { async fn write_content_to_temp_file(&self, content: &str) -> Result { let temp_filename = format!( - "/tmp/opnsense-config-tmp-config_{}", + "/conf/harmony/opnsense-config-{}", SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() diff --git a/opnsense-config/src/modules/dhcp.rs b/opnsense-config/src/modules/dhcp.rs index 8ec3519..a59b1a9 100644 --- a/opnsense-config/src/modules/dhcp.rs +++ b/opnsense-config/src/modules/dhcp.rs @@ -1,4 +1,4 @@ -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum DhcpError { InvalidMacAddress(String), InvalidIpAddress(String), diff --git a/opnsense-config/src/modules/dns.rs b/opnsense-config/src/modules/dns.rs index 3bf045e..517b5ea 100644 --- a/opnsense-config/src/modules/dns.rs +++ b/opnsense-config/src/modules/dns.rs @@ -1,10 +1,10 @@ use opnsense_config_xml::{Host, OPNsense}; -pub struct DnsConfig<'a> { +pub struct UnboundDnsConfig<'a> { opnsense: &'a mut OPNsense, } -impl<'a> DnsConfig<'a> { +impl<'a> UnboundDnsConfig<'a> { pub fn new(opnsense: &'a mut OPNsense) -> Self { Self { opnsense } } diff --git a/opnsense-config/src/modules/dnsmasq.rs b/opnsense-config/src/modules/dnsmasq.rs index 1430ba6..c762f82 100644 --- a/opnsense-config/src/modules/dnsmasq.rs +++ b/opnsense-config/src/modules/dnsmasq.rs @@ -1,9 +1,12 @@ // dnsmasq.rs use crate::modules::dhcp::DhcpError; -use log::{debug, info}; +use log::{debug, info, warn}; +use opnsense_config_xml::dnsmasq::{DhcpRange, DnsMasq, DnsmasqHost}; // Assuming DhcpRange is defined in opnsense_config_xml::dnsmasq use opnsense_config_xml::{MaybeString, StaticMap}; +use std::collections::HashSet; use std::net::Ipv4Addr; use std::sync::Arc; +use uuid::Uuid; use opnsense_config_xml::OPNsense; @@ -25,74 +28,167 @@ impl<'a> DhcpConfigDnsMasq<'a> { } } - /// Removes a static mapping by its MAC address. - /// Static mappings are stored in the section of the config, shared with the ISC module. - pub fn remove_static_mapping(&mut self, mac: &str) { - let lan_dhcpd = self.get_lan_dhcpd(); - lan_dhcpd - .staticmaps - .retain(|static_entry| static_entry.mac != mac); + /// Removes a MAC address from a static mapping. + /// If the mapping has no other MAC addresses associated with it, the entire host entry is removed. + pub fn remove_static_mapping(&mut self, mac_to_remove: &str) { + let dnsmasq = self.get_dnsmasq(); + + // Update hwaddr fields for hosts that contain the MAC, removing it from the comma-separated list. + for host in dnsmasq.hosts.iter_mut() { + let mac = host.hwaddr.content_string(); + let original_macs: Vec<&str> = mac.split(',').collect(); + if original_macs + .iter() + .any(|m| m.eq_ignore_ascii_case(mac_to_remove)) + { + let updated_macs: Vec<&str> = original_macs + .into_iter() + .filter(|m| !m.eq_ignore_ascii_case(mac_to_remove)) + .collect(); + host.hwaddr = updated_macs.join(",").into(); + } + } + + // Remove any host entries that no longer have any MAC addresses. + dnsmasq + .hosts + .retain(|host_entry| !host_entry.hwaddr.content_string().is_empty()); } - /// Retrieves a mutable reference to the LAN interface's DHCP configuration. - /// This is located in the shared section of the config. - fn get_lan_dhcpd(&mut self) -> &mut opnsense_config_xml::DhcpInterface { - &mut self - .opnsense - .dhcpd - .elements - .iter_mut() - .find(|(name, _config)| name == "lan") - .expect("Interface lan should have dhcpd activated") - .1 + /// Retrieves a mutable reference to the DnsMasq configuration. + /// This is located in the section of the OPNsense config. + fn get_dnsmasq(&mut self) -> &mut DnsMasq { + self.opnsense + .dnsmasq + .as_mut() + .expect("Dnsmasq config must be initialized") } - /// Adds a new static DHCP mapping. - /// Validates the MAC address and checks for existing mappings to prevent conflicts. + /// Adds or updates a static DHCP mapping. + /// + /// This function implements specific logic to handle existing entries: + /// - If no host exists for the given IP or hostname, a new entry is created. + /// - If exactly one host exists for the IP and/or hostname, the new MAC is appended to it. + /// - It will error if the IP and hostname exist but point to two different host entries, + /// as this represents an unresolvable conflict. + /// - It will also error if multiple entries are found for the IP or hostname, indicating an + /// ambiguous state. pub fn add_static_mapping( &mut self, - mac: &str, - ipaddr: Ipv4Addr, + mac: &Vec, + ipaddr: &Ipv4Addr, hostname: &str, ) -> Result<(), DhcpError> { - let mac = mac.to_string(); - let hostname = hostname.to_string(); - let lan_dhcpd = self.get_lan_dhcpd(); - let existing_mappings: &mut Vec = &mut lan_dhcpd.staticmaps; + let mut hostname_split = hostname.split("."); + let hostname = hostname_split.next().expect("hostname cannot be empty"); + let domain_name = hostname_split.collect::>().join("."); - if !Self::is_valid_mac(&mac) { - return Err(DhcpError::InvalidMacAddress(mac)); + if let Some(m) = mac.iter().find(|m| !Self::is_valid_mac(m)) { + return Err(DhcpError::InvalidMacAddress(m.to_string())); } - // TODO: Validate that the IP address is within a configured DHCP range. + let ip_str = ipaddr.to_string(); + let hosts = &mut self.get_dnsmasq().hosts; - if existing_mappings + let ip_indices: Vec = hosts .iter() - .any(|m| m.ipaddr == ipaddr.to_string() && m.mac == mac) - { - info!("Mapping already exists for {} [{}], skipping", ipaddr, mac); - return Ok(()); - } + .enumerate() + .filter(|(_, h)| h.ip.content_string() == ip_str) + .map(|(i, _)| i) + .collect(); - if existing_mappings + let hostname_indices: Vec = hosts .iter() - .any(|m| m.ipaddr == ipaddr.to_string()) + .enumerate() + .filter(|(_, h)| h.host == hostname) + .map(|(i, _)| i) + .collect(); + + let ip_set: HashSet = ip_indices.iter().cloned().collect(); + let hostname_set: HashSet = hostname_indices.iter().cloned().collect(); + + if !ip_indices.is_empty() + && !hostname_indices.is_empty() + && ip_set.intersection(&hostname_set).count() == 0 { - return Err(DhcpError::IpAddressAlreadyMapped(ipaddr.to_string())); + return Err(DhcpError::Configuration(format!( + "Configuration conflict: IP {} and hostname '{}' exist, but in different static host entries.", + ipaddr, hostname + ))); } - if existing_mappings.iter().any(|m| m.mac == mac) { - return Err(DhcpError::MacAddressAlreadyMapped(mac)); + let mut all_indices: Vec<&usize> = ip_set.union(&hostname_set).collect(); + all_indices.sort(); + + let mac_list = mac.join(","); + + match all_indices.len() { + 0 => { + info!( + "Creating new static host for {} ({}) with MAC {}", + hostname, ipaddr, mac_list + ); + let new_host = DnsmasqHost { + uuid: Uuid::new_v4().to_string(), + host: hostname.to_string(), + ip: ip_str.into(), + hwaddr: mac_list.into(), + local: MaybeString::from("1"), + ignore: Some(0), + domain: domain_name.into(), + ..Default::default() + }; + hosts.push(new_host); + } + 1 => { + let host_index = *all_indices[0]; + let host_to_modify = &mut hosts[host_index]; + let host_to_modify_ip = host_to_modify.ip.content_string(); + if host_to_modify_ip != ip_str { + warn!( + "Hostname '{}' already exists with a different IP ({}). Setting new IP {ip_str}. Appending MAC {}.", + hostname, host_to_modify_ip, mac_list + ); + host_to_modify.ip.content = Some(ip_str); + } else if host_to_modify.host != hostname { + warn!( + "IP {} already exists with a different hostname ('{}'). Setting hostname to {hostname}. Appending MAC {}.", + ipaddr, host_to_modify.host, mac_list + ); + host_to_modify.host = hostname.to_string(); + } + + for single_mac in mac.iter() { + if !host_to_modify + .hwaddr + .content_string() + .split(',') + .any(|m| m.eq_ignore_ascii_case(single_mac)) + { + info!( + "Appending MAC {} to existing static host for {} ({})", + single_mac, host_to_modify.host, host_to_modify_ip + ); + let mut updated_macs = host_to_modify.hwaddr.content_string().to_string(); + updated_macs.push(','); + updated_macs.push_str(single_mac); + host_to_modify.hwaddr.content = updated_macs.into(); + } else { + debug!( + "MAC {} already present in static host entry for {} ({}). No changes made.", + single_mac, host_to_modify.host, host_to_modify_ip + ); + } + } + } + _ => { + return Err(DhcpError::Configuration(format!( + "Configuration conflict: Found multiple host entries matching IP {} and/or hostname '{}'. Cannot resolve automatically.", + ipaddr, hostname + ))); + } } - let static_map = StaticMap { - mac, - ipaddr: ipaddr.to_string(), - hostname: hostname, - ..Default::default() - }; - - existing_mappings.push(static_map); Ok(()) } @@ -110,13 +206,20 @@ impl<'a> DhcpConfigDnsMasq<'a> { /// Retrieves the list of current static mappings by shelling out to `configctl`. /// This provides the real-time state from the running system. pub async fn get_static_mappings(&self) -> Result, Error> { + // Note: This command is for the 'dhcpd' service. If dnsmasq uses a different command + // or key, this will need to be adjusted. let list_static_output = self .opnsense_shell .exec("configctl dhcpd list static") .await?; - let value: serde_json::Value = serde_json::from_str(&list_static_output) - .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}")); + let value: serde_json::Value = serde_json::from_str(&list_static_output).map_err(|e| { + Error::Command(format!( + "Got invalid json from configctl {list_static_output} : {e}" + )) + })?; + + // The JSON output key might be 'dhcpd' even when dnsmasq is the backend. let static_maps = value["dhcpd"] .as_array() .ok_or(Error::Command(format!( @@ -135,6 +238,36 @@ impl<'a> DhcpConfigDnsMasq<'a> { Ok(static_maps) } + pub async fn set_dhcp_range(&mut self, start: &str, end: &str) -> Result<(), DhcpError> { + let dnsmasq = self.get_dnsmasq(); + let ranges = &mut dnsmasq.dhcp_ranges; + + // Assuming DnsMasq has dhcp_ranges: Vec + // Find existing range for "lan" interface + if let Some(range) = ranges + .iter_mut() + .find(|r| r.interface == Some("lan".to_string())) + { + // Update existing range + range.start_addr = Some(start.to_string()); + range.end_addr = Some(end.to_string()); + } else { + // Create new range + let new_range = DhcpRange { + uuid: Some(Uuid::new_v4().to_string()), + interface: Some("lan".to_string()), + start_addr: Some(start.to_string()), + end_addr: Some(end.to_string()), + domain_type: Some("range".to_string()), + nosync: Some(0), + ..Default::default() + }; + ranges.push(new_range); + } + + Ok(()) + } + pub async fn set_pxe_options( &self, tftp_ip: Option, @@ -142,9 +275,9 @@ impl<'a> DhcpConfigDnsMasq<'a> { efi_filename: String, ipxe_filename: String, ) -> Result<(), DhcpError> { - // As of writing this opnsense does not support negative tags, and the dnsmasq config is a - // bit complicated anyways. So we are writing directly a dnsmasq config file to - // /usr/local/etc/dnsmasq.conf.d + // OPNsense does not support negative tags via its API for dnsmasq, and the required + // logic is complex. Therefore, we write a configuration file directly to the + // dnsmasq.conf.d directory to achieve the desired PXE boot behavior. let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}")); let config = format!( @@ -163,7 +296,7 @@ dhcp-boot=tag:efi,tag:!ipxe,{efi_filename}{tftp_str} dhcp-boot=tag:ipxe,{ipxe_filename}{tftp_str} # Provide undionly to legacy bios clients -dhcp-boot=tag:bios,{bios_filename}{tftp_str} +dhcp-boot=tag:bios,tag:!ipxe,{bios_filename}{tftp_str} " ); info!("Writing configuration file to {DNS_MASQ_PXE_CONFIG_FILE}"); @@ -185,3 +318,302 @@ dhcp-boot=tag:bios,{bios_filename}{tftp_str} Ok(()) } } + +#[cfg(test)] +mod test { + use crate::config::DummyOPNSenseShell; + + use super::*; + use opnsense_config_xml::OPNsense; + use std::net::Ipv4Addr; + use std::sync::Arc; + + /// Helper function to create a DnsmasqHost with minimal boilerplate. + fn create_host(uuid: &str, host: &str, ip: &str, hwaddr: &str) -> DnsmasqHost { + DnsmasqHost { + uuid: uuid.to_string(), + host: host.to_string(), + ip: ip.into(), + hwaddr: hwaddr.into(), + local: MaybeString::from("1"), + ignore: Some(0), + ..Default::default() + } + } + + /// Helper to set up the test environment with an initial OPNsense configuration. + fn setup_test_env(initial_hosts: Vec) -> DhcpConfigDnsMasq<'static> { + let opnsense_config = Box::leak(Box::new(OPNsense { + dnsmasq: Some(DnsMasq { + hosts: initial_hosts, + ..Default::default() + }), + ..Default::default() + })); + + DhcpConfigDnsMasq::new(opnsense_config, Arc::new(DummyOPNSenseShell {})) + } + + #[test] + fn test_add_first_static_mapping() { + let mut dhcp_config = setup_test_env(vec![]); + let ip = Ipv4Addr::new(192, 168, 1, 10); + let mac = "00:11:22:33:44:55"; + let hostname = "new-host"; + + dhcp_config + .add_static_mapping(&vec![mac.to_string()], &ip, hostname) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + let host = &hosts[0]; + assert_eq!(host.host, hostname); + assert_eq!(host.ip, ip.to_string().into()); + assert_eq!(host.hwaddr.content_string(), mac); + assert!(Uuid::parse_str(&host.uuid).is_ok()); + } + + #[test] + fn test_hostname_split_into_host_domain() { + let mut dhcp_config = setup_test_env(vec![]); + let ip = Ipv4Addr::new(192, 168, 1, 10); + let mac = "00:11:22:33:44:55"; + let hostname = "new-host"; + let domain = "some.domain"; + + dhcp_config + .add_static_mapping(&vec![mac.to_string()], &ip, &format!("{hostname}.{domain}")) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + let host = &hosts[0]; + assert_eq!(host.host, hostname); + assert_eq!(host.domain.content_string(), domain); + assert_eq!(host.ip, ip.to_string().into()); + assert_eq!(host.hwaddr.content_string(), mac); + assert!(Uuid::parse_str(&host.uuid).is_ok()); + } + + #[test] + fn test_add_mac_to_existing_host_by_ip_and_hostname() { + let initial_host = create_host( + "uuid-1", + "existing-host", + "192.168.1.20", + "AA:BB:CC:DD:EE:FF", + ); + let mut dhcp_config = setup_test_env(vec![initial_host]); + let ip = Ipv4Addr::new(192, 168, 1, 20); + let new_mac = "00:11:22:33:44:55"; + let hostname = "existing-host"; + + dhcp_config + .add_static_mapping(&vec![new_mac.to_string()], &ip, hostname) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + let host = &hosts[0]; + assert_eq!( + host.hwaddr.content_string(), + "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" + ); + } + + #[test] + fn test_add_mac_to_existing_host_by_ip_only() { + let initial_host = create_host( + "uuid-1", + "existing-host", + "192.168.1.20", + "AA:BB:CC:DD:EE:FF", + ); + let mut dhcp_config = setup_test_env(vec![initial_host]); + let ip = Ipv4Addr::new(192, 168, 1, 20); + let new_mac = "00:11:22:33:44:55"; + + // Using a different hostname should still find the host by IP and log a warning. + let new_hostname = "different-host-name"; + dhcp_config + .add_static_mapping(&vec![new_mac.to_string()], &ip, new_hostname) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + let host = &hosts[0]; + assert_eq!( + host.hwaddr.content_string(), + "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" + ); + assert_eq!(host.host, new_hostname); // hostname should be updated + } + + #[test] + fn test_add_mac_to_existing_host_by_hostname_only() { + let initial_host = create_host( + "uuid-1", + "existing-host", + "192.168.1.20", + "AA:BB:CC:DD:EE:FF", + ); + let mut dhcp_config = setup_test_env(vec![initial_host]); + let new_mac = "00:11:22:33:44:55"; + let hostname = "existing-host"; + + // Using a different IP should still find the host by hostname and log a warning. + dhcp_config + .add_static_mapping( + &vec![new_mac.to_string()], + &Ipv4Addr::new(192, 168, 1, 99), + hostname, + ) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + let host = &hosts[0]; + assert_eq!( + host.hwaddr.content_string(), + "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" + ); + assert_eq!(host.ip.content_string(), "192.168.1.99"); // Original IP should be preserved. + } + + #[test] + fn test_add_duplicate_mac_to_host() { + let initial_mac = "AA:BB:CC:DD:EE:FF"; + let initial_host = create_host("uuid-1", "host-1", "192.168.1.20", initial_mac); + let mut dhcp_config = setup_test_env(vec![initial_host]); + + dhcp_config + .add_static_mapping( + &vec![initial_mac.to_string()], + &Ipv4Addr::new(192, 168, 1, 20), + "host-1", + ) + .unwrap(); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + assert_eq!(hosts[0].hwaddr.content_string(), initial_mac); // No change, no duplication. + } + + #[test] + fn test_add_invalid_mac_address() { + let mut dhcp_config = setup_test_env(vec![]); + let result = dhcp_config.add_static_mapping( + &vec!["invalid-mac".to_string()], + &Ipv4Addr::new(10, 0, 0, 1), + "host", + ); + assert!(matches!(result, Err(DhcpError::InvalidMacAddress(_)))); + } + + #[test] + fn test_error_on_conflicting_ip_and_hostname() { + let host_a = create_host("uuid-a", "host-a", "192.168.1.10", "AA:AA:AA:AA:AA:AA"); + let host_b = create_host("uuid-b", "host-b", "192.168.1.20", "BB:BB:BB:BB:BB:BB"); + let mut dhcp_config = setup_test_env(vec![host_a, host_b]); + + let result = dhcp_config.add_static_mapping( + &vec!["CC:CC:CC:CC:CC:CC".to_string()], + &Ipv4Addr::new(192, 168, 1, 10), + "host-b", + ); + // This IP belongs to host-a, but the hostname belongs to host-b. + assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: IP 192.168.1.10 and hostname 'host-b' exist, but in different static host entries.".to_string()))); + } + + #[test] + fn test_error_on_multiple_ip_matches() { + let host_a = create_host("uuid-a", "host-a", "192.168.1.30", "AA:AA:AA:AA:AA:AA"); + let host_b = create_host("uuid-b", "host-b", "192.168.1.30", "BB:BB:BB:BB:BB:BB"); + let mut dhcp_config = setup_test_env(vec![host_a, host_b]); + + // This IP is ambiguous. + let result = dhcp_config.add_static_mapping( + &vec!["CC:CC:CC:CC:CC:CC".to_string()], + &Ipv4Addr::new(192, 168, 1, 30), + "new-host", + ); + assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: Found multiple host entries matching IP 192.168.1.30 and/or hostname 'new-host'. Cannot resolve automatically.".to_string()))); + } + + #[test] + fn test_remove_mac_from_multi_mac_host() { + let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2,mac-3"); + let mut dhcp_config = setup_test_env(vec![host]); + + dhcp_config.remove_static_mapping("mac-2"); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + assert_eq!(hosts[0].hwaddr.content_string(), "mac-1,mac-3"); + } + + #[test] + fn test_remove_last_mac_from_host() { + let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1"); + let mut dhcp_config = setup_test_env(vec![host]); + + dhcp_config.remove_static_mapping("mac-1"); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert!(hosts.is_empty()); + } + + #[test] + fn test_remove_non_existent_mac() { + let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2"); + let mut dhcp_config = setup_test_env(vec![host.clone()]); + + dhcp_config.remove_static_mapping("mac-nonexistent"); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 1); + assert_eq!(hosts[0], host); // The host should be unchanged. + } + + #[test] + fn test_remove_mac_case_insensitively() { + let host = create_host("uuid-1", "host-1", "192.168.1.50", "AA:BB:CC:DD:EE:FF"); + let mut dhcp_config = setup_test_env(vec![host]); + + dhcp_config.remove_static_mapping("aa:bb:cc:dd:ee:ff"); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert!(hosts.is_empty()); + } + + #[test] + fn test_remove_mac_from_correct_host_only() { + let host1 = create_host( + "uuid-1", + "host-1", + "192.168.1.50", + "AA:AA:AA:AA:AA:AA,BB:BB:BB:BB:BB:BB", + ); + let host2 = create_host( + "uuid-2", + "host-2", + "192.168.1.51", + "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD", + ); + let mut dhcp_config = setup_test_env(vec![host1.clone(), host2.clone()]); + + dhcp_config.remove_static_mapping("AA:AA:AA:AA:AA:AA"); + + let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; + assert_eq!(hosts.len(), 2); + let updated_host1 = hosts.iter().find(|h| h.uuid == "uuid-1").unwrap(); + let unchanged_host2 = hosts.iter().find(|h| h.uuid == "uuid-2").unwrap(); + + assert_eq!(updated_host1.hwaddr.content_string(), "BB:BB:BB:BB:BB:BB"); + assert_eq!( + unchanged_host2.hwaddr.content_string(), + "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD" + ); + } +} diff --git a/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml b/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml new file mode 100644 index 0000000..f36e4f7 --- /dev/null +++ b/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml @@ -0,0 +1,1674 @@ + + + opnsense + + + 115200 + serial + normal + OPNsense + testpxe.harmony.mcd + + admins + System Administrators + system + 1999 + 0,2000 + page-all + + + + root + System Administrator + system + $2y$10$YRVoF4SgskIsrXOvOQjGieB9XqHPRra9R7d80B3BZdbY/j21TwBfS + + 0 + 0 + + + + + + + + + + + + + Etc/UTC + 0.opnsense.pool.ntp.org 1.opnsense.pool.ntp.org 2.opnsense.pool.ntp.org 3.opnsense.pool.ntp.org + + https + 68a72b6f7f776 + + + + + + 1 + yes + 1 + 1 + 1 + 1 + 1 + 1 + hadp + hadp + hadp + + monthly + + 1 + 1 + + admins + 1 + + + + + + enabled + 1 + + 1 + + + -1 + -1 + + + + os-caddy,os-haproxy,os-tftp + + + 0 + + en_US + + 1 + + + + + vtnet0 + + 1 + + + dhcp + + 1 + 1 + + dhcp6 + 0 + + + + + + vtnet1 + 1 + 192.168.1.1 + 24 + + + + + + + + + 1 + lo0 + Loopback + 1 + 127.0.0.1 + none + 1 + 8 + ::1 + 128 + + + + + + + public + + + + + automatic + + + + + pass + lan + inet + Default allow LAN to any rule + + lan + + + + + + + pass + lan + inet6 + Default allow LAN IPv6 to any rule + + lan + + + + + + + + + + + 0.opnsense.pool.ntp.org + + + root@192.168.1.5 + /api/dnsmasq/settings/set made changes + + + + + + + + + + + + + + + v9 + + + + 0 + + 1800 + 15 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + 0 + wan + 192.168.0.0/16,10.0.0.0/8,172.16.0.0/12 + + + W0D23 + 4 + + + + + + + 0 + 0 + 0 + + + + 0 + 0 + + + + 0 + 0 + 0 + + + + + + + + + 0 + 0 + + + + + + + + + 16 + 32 + 4 + 1000 + 1 + 0 + 0 + 0 + + + + + + + + 1 + 0 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + + + + + + + + 0 + + + + + + + 0 + 0 + + + ipsec + 0 + 1 + + + + + + + + + + + + + 0 + 0 + + 4000 + 1 + raw + + + 0 + + 2 + + + + + + + + 0 + 127.0.0.1 + 8000 + + + + + 0 + 0 + + 4000 + 1 + + + 0 + + 2 + + + + + + + + + + 0 + 120 + 120 + 127.0.0.1 + 25 + + + 0 + auto + 1 + + + + + 0 + root + + 2812 + + + 5 + 1 + + + 0 + root@localhost.local + 0 + + + + + + + 1 + $HOST + + system + + + + 300 + 30 +
+ + + + cfed35dc-f74b-417d-9ed9-682c5de96495,f961277a-07f1-49a4-90ee-bb15738d9ebb,30b2cce2-f650-4e44-a3e2-ee53886cda3f,3c86136f-35a4-4126-865b-82732c6542d9 + + + + + 1 + RootFs + + filesystem + + + / + 300 + 30 +
+ + + + fbb8dfe2-b9ad-4730-a0f3-41d7ecda6289 + + + + + 0 + carp_status_change + + custom + + + /usr/local/opnsense/scripts/OPNsense/Monit/carp_status + 300 + 30 +
+ + + + 11ceca8a-dff8-45e0-9dc5-ed80dc4b3947 + + + + + 0 + gateway_alert + + custom + + + /usr/local/opnsense/scripts/OPNsense/Monit/gateway_alert + 300 + 30 +
+ + + + fad1f465-4a92-4b93-be66-59d7059b8779 + + + + + Ping + NetworkPing + failed ping + alert + + + + NetworkLink + NetworkInterface + failed link + alert + + + + NetworkSaturation + NetworkInterface + saturation is greater than 75% + alert + + + + MemoryUsage + SystemResource + memory usage is greater than 75% + alert + + + + CPUUsage + SystemResource + cpu usage is greater than 75% + alert + + + + LoadAvg1 + SystemResource + loadavg (1min) is greater than 4 + alert + + + + LoadAvg5 + SystemResource + loadavg (5min) is greater than 3 + alert + + + + LoadAvg15 + SystemResource + loadavg (15min) is greater than 2 + alert + + + + SpaceUsage + SpaceUsage + space usage is greater than 75% + alert + + + + ChangedStatus + ProgramStatus + changed status + alert + + + + NonZeroStatus + ProgramStatus + status != 0 + alert + + + + + + + + + 1 + 1 + 31 + + + + + + + + + + + + 1 + 53 + 0 + + 0 + 0 + + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + transparent + + 0 + + + 0 + 0 + 0 + 0 + 0 + 1 + 0 + + + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + + 0.0.0.0/8,10.0.0.0/8,100.64.0.0/10,169.254.0.0/16,172.16.0.0/12,192.0.2.0/24,192.168.0.0/16,198.18.0.0/15,198.51.100.0/24,203.0.113.0/24,233.252.0.0/24,::1/128,2001:db8::/32,fc00::/8,fd00::/8,fe80::/10 + + + + + + + + + + + + + + 0 + + + + + allow + + + 0 + 0 + + + + + +
+ 0 + + + 0 + + + + + + + + + 0 + 0 + 0 + 1 + 0 + + + + + + + + + + + 1 + 192.168.1.1 + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + 0 + 60s + + 0 + 0 + 1 + + 0 + + + 1024 + + + 1024 + + + 0 + + 1 + ipv4 + ignore + 2048 + 16384 + 2 + 0 + 0 + + 0 + 300 + 3600 + 0 + prefer-client-ciphers + TLSv1.2 + + ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256 + TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + + + + + + + + + + + 30s + 30s + + 30s + 3 + x-1 + last,libc + + + + 127.0.0.1 + local0 + info + + + + 0 + 8822 + 0 + + 0 + + + + + 0 + *:8404 + /metrics + + + 0 + 4 + 60 + + 0 + 10 + + + + + 9fdfbc10cd927a0e.4bc71f5a + 1 + frontend_192.168.1.1:80 + + 192.168.1.1:80 + + tcp + 5ac12c5f-70c9-4beb-85ca-a76060170ce0 + 0 + + + + 0 + + + + + + 0 + 0 + 0 + 0 + 0 + + + + 0 + + + + + + + + + 0 + 0 + 0 + 0 + 0 + + + + + 0 + + + + + + + + + 0 + 0 + + 0 + 0 + + + + + + + + 4e26f04f7ce919a9.9309067c + 1 + frontend_192.168.1.1:443 + + 192.168.1.1:443 + + tcp + 62e22d3f-58e4-4a58-bb31-88b55337d41c + 0 + + + + 0 + + + + + + 0 + 0 + 0 + 0 + 0 + + + + 0 + + + + + + + + + 0 + 0 + 0 + 0 + 0 + + + + + 0 + + + + + + + + + 0 + 0 + + 0 + 0 + + + + + + + + 9a98ae460a9aafb7.422509b9 + 1 + frontend_192.168.1.1:22623 + + 192.168.1.1:22623 + + tcp + 7aa31ee2-86f3-4ee2-b661-98ca6eb76bc9 + 0 + + + + 0 + + + + + + 0 + 0 + 0 + 0 + 0 + + + + 0 + + + + + + + + + 0 + 0 + 0 + 0 + 0 + + + + + 0 + + + + + + + + + 0 + 0 + + 0 + 0 + + + + + + + + 7832147fac80fc37.486ed3ac + 1 + frontend_192.168.1.1:6443 + + 192.168.1.1:6443 + + tcp + 61ef67ba-68e5-46fb-89ed-ede779bfcfc0 + 0 + + + + 0 + + + + + + 0 + 0 + 0 + 0 + 0 + + + + 0 + + + + + + + + + 0 + 0 + 0 + 0 + 0 + + + + + 0 + + + + + + + + + 0 + 0 + + 0 + 0 + + + + + + + + + + 6a54c1779007c844.4f855343 + 1 + backend_192.168.1.1:80 + + tcp + roundrobin + 2 + + 07f4ebd1-5f2e-48c5-9418-a719109b2928,cfd90752-fc3e-42c5-8a7c-e49486e8de38 + + + + + + 1 + 24321590-fe6e-4ada-980a-1c9e1bb6a990 + 0 + + + + + + 0 + 0 + + + + + 0 + + + 30m + 50k + + + 10s + 10s + 10s + 10s + 1m + 1m + 0 + + + + + + + + + 0 + + 0 + + + + + 17919f61aad1fdcb.6e03e25e + 1 + backend_192.168.1.1:443 + + tcp + roundrobin + 2 + + 2dfb75a8-1713-4452-a5f4-83c9759729ca,ff876fa2-0409-4b51-bbaf-406c53e74a71 + + + + + + 1 + bb246c2b-6180-428c-a168-a2875b3d1b0a + 0 + + + + + + 0 + 0 + + + + + 0 + + + 30m + 50k + + + 10s + 10s + 10s + 10s + 1m + 1m + 0 + + + + + + + + + 0 + + 0 + + + + + 9dadf56d866ff29b.690416dd + 1 + backend_192.168.1.1:22623 + + tcp + roundrobin + 2 + + c8653027-8497-4bba-a5ae-6034011cf7c7,89ffae6e-79a4-413a-89ea-6e9e52783243 + + + + + + 1 + 46542083-998b-4bb9-a47c-a586aac8bc0d + 0 + + + + + + 0 + 0 + + + + + 0 + + + 30m + 50k + + + 10s + 10s + 10s + 10s + 1m + 1m + 0 + + + + + + + + + 0 + + 0 + + + + + fd857b0343b2e697.21f0f89e + 1 + backend_192.168.1.1:6443 + + tcp + roundrobin + 2 + + b85e244f-fbb8-4d5d-93be-ff9fb9d828fc,c94bcbd2-2fe2-49a5-b611-034d1fc54119 + + + + + + 1 + 52dc61b7-d23e-4e47-ba22-9edfd914bbcb + 0 + + + + + + 0 + 0 + + + + + 0 + + + 30m + 50k + + + 10s + 10s + 10s + 10s + 1m + 1m + 0 + + + + + + + + + 0 + + 0 + + + + + + + f33ae26833f881f7.d1ec1e06 + 1 + 10.100.8.20_80 + +
10.100.8.20
+ 80 + + active + + static + + + + + + 0 + + 0 + + + + + + + + + +
+ + fd47b7f46d7d69d8.65fb15f6 + 1 + 10.100.8.20_443 + +
10.100.8.20
+ 443 + + active + + static + + + + + + 0 + + 0 + + + + + + + + + +
+ + 3e3974dbcbc95c6d.cc0c066d + 1 + 10.100.8.20_22623 + +
10.100.8.20
+ 22623 + + active + + static + + + + + + 0 + + 0 + + + + + + + + + +
+ + 2817f4f2b67b9a5f.1960d7f6 + 1 + 10.100.8.20_6443 + +
10.100.8.20
+ 6443 + + active + + static + + + + + + 0 + + 0 + + + + + + + + + +
+
+ + + TCP_serverport + + tcp + 2s + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + HTTP_GET_/readyz + + http + 2s + + + 0 + + GET + /readyz + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + 0 + + 0 + + 0 + + + +
+ + + + + 68a72b6f7f776 + Web GUI TLS certificate + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUhFakNDQlBxZ0F3SUJBZ0lVUkZqWUQ0Z1U0bzRNZGdiN2pIc29KNU9GVGFnd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2dZWXhHakFZQmdOVkJBTU1FVTlRVG5ObGJuTmxMbWx1ZEdWeWJtRnNNUXN3Q1FZRFZRUUdFd0pPVERFVgpNQk1HQTFVRUNBd01XblZwWkMxSWIyeHNZVzVrTVJVd0V3WURWUVFIREF4TmFXUmtaV3hvWVhKdWFYTXhMVEFyCkJnTlZCQW9NSkU5UVRuTmxibk5sSUhObGJHWXRjMmxuYm1Wa0lIZGxZaUJqWlhKMGFXWnBZMkYwWlRBZUZ3MHkKTlRBNE1qRXhOREl4TXpaYUZ3MHlOakE1TWpJeE5ESXhNelphTUlHR01Sb3dHQVlEVlFRRERCRlBVRTV6Wlc1egpaUzVwYm5SbGNtNWhiREVMTUFrR0ExVUVCaE1DVGt3eEZUQVRCZ05WQkFnTURGcDFhV1F0U0c5c2JHRnVaREVWCk1CTUdBMVVFQnd3TVRXbGtaR1ZzYUdGeWJtbHpNUzB3S3dZRFZRUUtEQ1JQVUU1elpXNXpaU0J6Wld4bUxYTnAKWjI1bFpDQjNaV0lnWTJWeWRHbG1hV05oZEdVd2dnSWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUNEd0F3Z2dJSwpBb0lDQVFDbENkeFJ3ZWJQQkxvYlVORnYvL2t3TEdKWExweDl6OFFHV2lyWTNpamVDeUxDQ0FwczBLaE1adTNRClhkczMranppbDRnSE96L0hvUEo5Z0xxMy9FYnR4cE9ENWkvQzZwbXc3SGM1M2tTQ3JCK2tlWUFnVWZ1aDU3MzAKZyt3cGc5RDQzaHFBNzF1L3F0ZC95eitnTVJnTWdZMndEK3ZWQWRrdGxVSWlmN2piTmR1RDRGMmdkL0gwbzljWApEUm5zMzNNQVptTkZwajN4QWFwQi9RWnhKV1JMZ1J5K1A5MWcyZEZFNzhNaWY4ZTRNSCtrU29ndzIwVG1JbmpzCitKdEVTc0xQZmx2eUZUa0lkTVdFbURWOG1HUk5hNXVoYXlEbVNEUU9xV0NUTlZHV3ZVWjZTRnJRZ1Q1MDBEdXgKWnRtYlhGdEVqRzlIaGd5SW5QT0dKbWYzTWVzS3dYclVNMW1BenVCRVBFR0lwOTc3UTY2SitpTDYzWTUvTTB3aAphMGVVNGppNTVRQnJOQjlaWjJsa080bGU2TXdmZm50c29JakMrVDh5RW5tbW5nQTlTdWNPRW9CcFFhd3cvRGhOCmtSNGk4TUptR1JNdmpLazlHVzZ3Z2VNVThJVDhKZDRjTmJOVzdFSGpzV08xN1luTVhIMEUxOVZqa2d1R3dIODAKZ3ZROGtzTmV4WVA3WWo0b0VycnRKbWVhWU8wbFVkV0tGektNdS8va0UvNG5HK0h4emlRUnA5QmdyTURNYks4ZgpkM29mY2tqZFZTTW9Vc1FJaWlmdTFMK1I4V1Y3K3hsTzdTWS80dGk3Y28zcjNXRTYyVlE4Vk9QMVphcStWRFpvClNIMVRCa0lTSU5paVJFRzhZSDQvRHJwNWZ2dHBPcERBRGN1TGdDNDJHcExmS1pwVEtRSURBUUFCbzRJQmREQ0MKQVhBd0NRWURWUjBUQkFJd0FEQVJCZ2xnaGtnQmh2aENBUUVFQkFNQ0JrQXdOQVlKWUlaSUFZYjRRZ0VOQkNjVwpKVTlRVG5ObGJuTmxJRWRsYm1WeVlYUmxaQ0JUWlhKMlpYSWdRMlZ5ZEdsbWFXTmhkR1V3SFFZRFZSME9CQllFCkZIdUVQK05yYlorZWdMdWZVSUFKaUo2M1c4SDFNSUd3QmdOVkhTTUVnYWd3Z2FXaGdZeWtnWWt3Z1lZeEdqQVkKQmdOVkJBTU1FVTlRVG5ObGJuTmxMbWx1ZEdWeWJtRnNNUXN3Q1FZRFZRUUdFd0pPVERFVk1CTUdBMVVFQ0F3TQpXblZwWkMxSWIyeHNZVzVrTVJVd0V3WURWUVFIREF4TmFXUmtaV3hvWVhKdWFYTXhMVEFyQmdOVkJBb01KRTlRClRuTmxibk5sSUhObGJHWXRjMmxuYm1Wa0lIZGxZaUJqWlhKMGFXWnBZMkYwWllJVVJGallENGdVNG80TWRnYjcKakhzb0o1T0ZUYWd3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQ0FJQ01Bc0dBMVVkRHdRRQpBd0lGb0RBY0JnTlZIUkVFRlRBVGdoRlBVRTV6Wlc1elpTNXBiblJsY201aGJEQU5CZ2txaGtpRzl3MEJBUXNGCkFBT0NBZ0VBV2JzM2MwSXYwcEd3Y0wvUmRlbnBiZVJHQ3FsODY0V1ZITEtMZzJIR3BkKytJdmRFcHJEZkZ3SCsKdHdOd2VrZTlXUEtYa20vUkZDWE5DQmVLNjkxeURVWCtCNUJOMjMvSks5N1lzRVdtMURIV3FvSDE1WmdqelZ0QQp2d2JmbnRQdlhCWU1wV2ZQY0Zua0hjN3pxUjI3RzBEZHFUeGg2TjhFenV1S3JRWXFtaWhJUXFkNU9HRVhteW9ZCmdPVjdoZ0lWSUR6a1Z0QkRiS3dFV3VFN2pKYzViMXR4Mk1FUFRsVklEZGo0Zm5vdURWemdkczA2RER4aFM4eXAKbXJOSXhxb045ekEzYXVtTnRNZ2haSHVZRHdjbm5GSnBNZHlJSEdHZ1dlNnZZNHFtdEFSVDd3a0x6MTZnUG9LMAo5bFhVU0RmV3YwUDJGUXFHZTJjaXQ3VVE2ZGtsUWsrVGVtUEFwNnhEV09HR3oxRkdmUUoxN040b3AvOGtlOUo2Cm96RVp3QTh1aDVYTUl2N3loM2dobjV1d1R6RDUyZ1BBZFdaekEyaHVWV3p5cVM0WVc0N3ZkaGV6TTFTUndabVEKUmYzNDk0UVFydWd0bzdycWdMUlRTSXN4WEtkU21MaHZjT0hsSlhISW1XNTRzeFlXNm9NUStpRExFT29ZVVdpcgp1aUJvT1RsNEJaOG5Xcm9pV0JtWlFLaVRPYlFRczVWTkIwYnVybmRISTJVdmtIRTE3QTM0bFYySjY5Q0dNNzJ2CjQ5aE9TN3B2Tzg4cEVKZm90d01YYlRhdkR2WTBHazZxbERFMVBud1U2Wm8ySEprcFdUTUxOSzh1alZ1RkhlMGkKR2JvZi9va08vZW4rUi9PUXNyd1JYbzFwVTRiWnlyWGVQeUdqSSsrdFYzemhjd0IwWjNJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + + + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRUUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Nzd2dna25BZ0VBQW9JQ0FRQ2xDZHhSd2ViUEJMb2IKVU5Gdi8va3dMR0pYTHB4OXo4UUdXaXJZM2lqZUN5TENDQXBzMEtoTVp1M1FYZHMzK2p6aWw0Z0hPei9Ib1BKOQpnTHEzL0VidHhwT0Q1aS9DNnBtdzdIYzUza1NDckIra2VZQWdVZnVoNTczMGcrd3BnOUQ0M2hxQTcxdS9xdGQvCnl6K2dNUmdNZ1kyd0QrdlZBZGt0bFVJaWY3amJOZHVENEYyZ2QvSDBvOWNYRFJuczMzTUFabU5GcGozeEFhcEIKL1FaeEpXUkxnUnkrUDkxZzJkRkU3OE1pZjhlNE1IK2tTb2d3MjBUbUluanMrSnRFU3NMUGZsdnlGVGtJZE1XRQptRFY4bUdSTmE1dWhheURtU0RRT3FXQ1ROVkdXdlVaNlNGclFnVDUwMER1eFp0bWJYRnRFakc5SGhneUluUE9HCkptZjNNZXNLd1hyVU0xbUF6dUJFUEVHSXA5NzdRNjZKK2lMNjNZNS9NMHdoYTBlVTRqaTU1UUJyTkI5WloybGsKTzRsZTZNd2ZmbnRzb0lqQytUOHlFbm1tbmdBOVN1Y09Fb0JwUWF3dy9EaE5rUjRpOE1KbUdSTXZqS2s5R1c2dwpnZU1VOElUOEpkNGNOYk5XN0VIanNXTzE3WW5NWEgwRTE5VmprZ3VHd0g4MGd2UThrc05leFlQN1lqNG9FcnJ0CkptZWFZTzBsVWRXS0Z6S011Ly9rRS80bkcrSHh6aVFScDlCZ3JNRE1iSzhmZDNvZmNramRWU01vVXNRSWlpZnUKMUwrUjhXVjcreGxPN1NZLzR0aTdjbzNyM1dFNjJWUThWT1AxWmFxK1ZEWm9TSDFUQmtJU0lOaWlSRUc4WUg0LwpEcnA1ZnZ0cE9wREFEY3VMZ0M0MkdwTGZLWnBUS1FJREFRQUJBb0lDQUFTSHc4Tit4aDR5ckFVcDc4WGFTZlhYCmtnK0FtUTBmRWV0MnVDeGgxTTlia09Xd29OQ2gzYXpUT24zNHhaYkF5TUVUbGNsVkNBZ3IwOXc4RjJRTGljcm4KSTQrQVZ4bExwVkprKzFUY1ZCY2VNSFFzWGFjRmVSblZxYkkzbU5qKzVGS2dqaXV4NWx2WmpiYlZWbmJJUWplOQpxcTBGa3R5ekEwb3NDYmUydDlWVW9pVDVtTGhaOG90Ym9BRGkvQzR6YUEyL3djUGNyMkNaUWhvem51U21PUjJWCmVydUNOMHA4VURGTFA1a0gxdXlvY0NpTFh6ZXdIVEVRQ3krK0YwMEZuRmxqeDVSYW5za3JvMnhqWFR5QlZtZUYKcDYwRHF0Q0hkTjVlS2VlQWxDL0dIRlFvL2swdzd3ejMxbHVsVGgza3FDQzJsaXRwYzVpZ2JsTGxaUDgxSUpXTQp0bkhlczNsTXk1RGNDWUx3L3huZFdmVDZFMTB4WlhFNWI0QTdxYjF4Yjhsd1FoNHFJckhDZ2p1NDVPYXNCMERJClBYZ3E2eWkwL2FKWXV6SU5kcjRTeFRibExGUkp6MXlQaGZTZDVGbjdWQVBYU1JNTDlDbzJrL0M1SDlwdG1HMjYKZHBLQVNib1ZMcStrbXg3anVKYXc0a1JNNHZmYndHZGNMZEhqMXByZ08xNkd1ckpQOVRRQ0x5YzhaR0xOekcvaApIMzBpU2FlclJOUmtDRlhmeTEzWWJJZTZHTE12KzVmODlYSENGNmZrZ1JkZjVrbTA3cEc3SCtMZytmZFdtd2lZCm0waExNSFVZeHJ3WkFma2tvZjhlSllscEVQVmQ3ZytCVjd2eTZhYW0yQituUTdHYk84WUprSnlJME04amlSaDEKeGdjRmFZaGZlT21RZGtVbi9BcUJBb0lCQVFEU1JZbDl0SnJyQk5UOXlZN0twWTJiOGVURFJqdDByU1NQRUJvNgppeWoyVWR5S1ZSbHFFdzRma2IrejV3WWt2bnBpMW1uS3NjNFlLZmoyaDVSdXVDbzVzTUNLbmpDUXBpbll4bWRFCk45Z3l6SWRYMmlzRUh6dXNSZkZiajBVSWU1dHc0TE9pL3cyVzFYWGJUc3liOFdhTmhkbVQ4TGxDNjQ5WkNNUWQKeDZkeTdOWS9uYnVWVVQ0KzM3WmV0VlR1eDg1ekl5OTdnMWp4dFZhaXZrd2hQVWtLcWpXWDYzaUZidjFuY1FVdgpiQURrWkFoOXRWYWV2UGZ2NURDeDZITldiVFlObjVRZWd3OTRyVndoSjhYb1V5ZDRqWFB0VmdXU2VkN0tWd2U5CmNkNW9CZWFBOVhDdnJxdkNIRjI4QXg2OUI2YWQrQlk1S0dVcGU2LythQnlKdlQwUkFvSUJBUURJN2c3c0dMc3AKVWJ4dGhJQm9yRzF5MDRKWWlnaE5VMlF4YjdzSkxocnRTc2NtRkxSZU5DYy8zOTBCV3ZrbVFIazFnZkpxV3hDLwp2R0VMT0Iwd3U5VFBBRWFsS25IZ2RhNkFTMURuM3NTWTltcFBRRjYvbEY2cm00cDlNUU1TTFo1V3ZsL0ZNRklHCjUvaXVSVjJaOVdkeTV4QVFWNG5uZmdMOWJDNzhPa2k3VnFPTDJDZk0vcEJEMHdzRUZmOGZiejFSZXo4dEFRZ2QKVXY4cEpFTWdDTCtQeEdkdG5DYUcxYm5obXhEUUxiWmQ4TTFOQkpKOWZLZFgzVWtLcTlDdmFJVXBIZlduSFBWVAprVWNwMUVTYnEzOFVhTzFSS1NBNUtQd1ZiNkVPVGJBSGtlaEN4ZVhpN2F3YkZwYXlTelpIaWl4Y05QQjk1YUtSCkpJQ0J5ekFwQTVTWkFvSUJBRlZKYXlrWGxqWjVNVUwyKy9ucUNIUVdPeW1SVlJCUUlpSDg4QWFLNTBSeGs3aHcKSit6RWFkZ1lMOTl5ZHlWME5RUGQzKzhkQzNEMXBVdXBWbVZLUWFaQXNQZ0lqYjQrQjM4cmlqczdRMi9uVVlZcQpzWVBzZnpHeTlPQ2tUZVhRN1ExdHRxOElNS1RiVkFCdUI4UEF1RTN5Mm51TkNqZkFmOVluSGhUT0pIY1M1UnZNCmlJZForcHRaOWdpWUdDajUxaDBSU25NWXBYejBobjFnSGxUbEhMazhySnhBSUJSUEhtMVVoRHZsM0w3R2JFTkEKeUM5K2lqbzlIaHNySTQwTW92NEhtZlorUmtvMlZzWUQ4ZHYzem15eFF6SWkwQVBIZHJ3dmJLNUVmMmRGN1dhbApKdDI3UldOb1NnUzJaME5ZMVJZQnlGSEt0cTJLdzZtMjVNeGhlMkVDZ2dFQVhSNFdSRXhoMEpCVXB0eVZOZTFTCis3Z1IzRDU4QW5uM0lRSUt5QUpaOEVhTGJKYUQwSFNUREFNUFJTV0grYlkvZGhDMjY1c3djK3MxZmlHUFJacUcKMFRmcmhYZmFOby9UUXhta2NSRElRNnRQTVZNL2xjR0k3amF6UTdtSEZ0R1ZZOVh1UkZCVWMyYmwxTDNJMXlUbgp3RlJkR1hXNEwxUXl4b2R3YnV3RDhPNEI5VGxEbUxrUTJwM2ZxUkVZbnRUS3NneFFCdWRIZjIrTFdPRzVTZ3RECjI3akZ4Z0pyeUdrY0wvWFJJT2xPYnRLK0VrZGdMRStzcmdlYlpocWlKK2hrYmQyNGpxM1k4OVdNQ1ZLYVNScDkKVmxRYVIxYXIzRkdtSWJrT0JyYnlNVS9wTjZqSEZSZllmdVZGQ1hQWnYrWEZFU1pubmJEaVdpbDBkTEpacTJoQgpZUUtDQVFBOVlTcE1wS3dhVGwrTmhTZlovMXU0NjZiMkpZcmJPYlRJM2VCZUowMnFwNXdQTjZYUHJ5aVZaZ1FXClh5cG04a3M5MEJIblBPNUczNFJnKzhLRFlONU1Ed1hBclJubDdSeTEySG5oV3lSaHNKYmdZOEh1c2d4SEROMU8KMEcwSFBpVWtIbTYydlRNYll6bkhPeE5sS1hFdFhBcTlZM3dQYkFjb01rRXZ0MzEwdEdLSUNtdTdEWkpXRlVvTAp1Y3RVS3Boc0V5VWdHbHIwRjJKekVoQWdMRXplczB0S1JpRWdlaFdnbXdlMEhlTEhCdW5oRFBTMmFJY2lCME1pCjY2SGc3cVZyMDlneXpFeGxrY3RLRzhsSm9WbU8vdlhucWQrWDB5M21YTUVZbkFIWHpIeG1Pd2JCNnF3Y3VWTlEKZytqRXliUWF3d3A2OC9id0JncFREQUhORGxrRQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + 0 + 0 + + 1400 + + + + + + 1 + 0 + 8080 + 8443 + + + + + 0 + + + + + + 0 + 10 + h1,h2 + + + + + 0 + 0 + 10 + + + + + + + 0 + + + + 0 + + + + + + + + + + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + lan + 0 + + + + + 0 + 0 + + + 1 + + 1 + + 0 + 1 + + 0 + 0 + + 1 + + teststatichost + + 1 + 192.168.1.20 + + + 01:c4:f3:f4:8a:15,01:c4:f3:f4:8a:16 + + 0 + + description + controlled by someone comments + + + + ipxe + + + pxeEfi + + + pxeBios + + + match + + + + + 8d190cf3-8d2d-47db-ab9b-fa21016b533e + iPXE + 0 + + + + diff --git a/opnsense-config/src/tests/data/config-full-1.xml b/opnsense-config/src/tests/data/config-full-1.xml index fbd7fe2..378d577 100644 --- a/opnsense-config/src/tests/data/config-full-1.xml +++ b/opnsense-config/src/tests/data/config-full-1.xml @@ -215,7 +215,6 @@ System Administrators system 1999 - 0 2000 page-all diff --git a/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml b/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml index 54c2475..f41b055 100644 --- a/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml +++ b/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml @@ -27,7 +27,6 @@ System Administrators system 1999 - 0 2000 page-all diff --git a/opnsense-config/src/tests/data/config-structure.xml b/opnsense-config/src/tests/data/config-structure.xml index ea51273..32c9317 100644 --- a/opnsense-config/src/tests/data/config-structure.xml +++ b/opnsense-config/src/tests/data/config-structure.xml @@ -27,7 +27,6 @@ System Administrators system 1999 - 0 2000 page-all