From 1cec398d4d4185adec39273e2b329a20da00f6f4 Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 29 Sep 2025 11:29:34 -0400 Subject: [PATCH 01/12] fix: modifed naming scheme to OpenshiftFamily, K3sFamily, and defaultswitched discovery of openshiftfamily to look for projet.openshift.io --- harmony/src/domain/topology/k8s_anywhere.rs | 28 +++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index e6c37ea..758198f 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -48,10 +48,10 @@ struct K8sState { } #[derive(Debug, Clone)] -pub enum K8sFlavour { - Okd, - K3d, - K8s, +pub enum KubernetesDistribution { + OpenshiftFamily, + K3sFamily, + Default, } #[derive(Debug, Clone)] @@ -64,7 +64,7 @@ enum K8sSource { pub struct K8sAnywhereTopology { k8s_state: Arc>>, tenant_manager: Arc>, - flavour: Arc>, + flavour: Arc>, config: Arc, } @@ -184,7 +184,7 @@ impl K8sAnywhereTopology { } } - pub async fn get_k8s_flavour(&self) -> K8sFlavour { + pub async fn get_k8s_distribution(&self) -> KubernetesDistribution { self.flavour .get_or_try_init(async || { let client = self.k8s_client().await.unwrap(); @@ -197,22 +197,22 @@ impl K8sAnywhereTopology { PreparationError::new(format!("Could not get server version: {}", e)) })?; - let rules: &[&dyn Fn() -> Option] = &[ + let rules: &[&dyn Fn() -> Option] = &[ // OpenShift / OKD &|| { discovery .groups() - .any(|g| g.name().ends_with("openshift.io")) - .then_some(K8sFlavour::Okd) + .any(|g| g.name() == "project.openshift.io") + .then_some(KubernetesDistribution::OpenshiftFamily) }, // K3d / K3s &|| { version .git_version .contains("k3s") - .then_some(K8sFlavour::K3d) + .then_some(KubernetesDistribution::K3sFamily) }, - // Vanilla Kubernetes + // Fallback Kubernetes K8s &|| { if !discovery .groups() @@ -220,7 +220,7 @@ impl K8sAnywhereTopology { && !version.git_version.contains("k3s") && !version.git_version.contains("k3d") { - Some(K8sFlavour::K8s) + Some(KubernetesDistribution::Default) } else { None } @@ -228,7 +228,9 @@ impl K8sAnywhereTopology { ]; rules.iter().find_map(|rule| rule()).ok_or_else(|| { - PreparationError::new("Unknown Kubernetes cluster flavour".to_string()) + PreparationError::new( + "Unable to detect Kubernetes cluster distribution".to_string(), + ) }) }) .await From 2d3c32469c49107bf0e4a625b8e6f39afe27740c Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Tue, 30 Sep 2025 22:59:50 -0400 Subject: [PATCH 02/12] chore: Simplify k8s flavour detection algorithm and do not unwrap when it cannot be detected, just return Err --- harmony/src/domain/topology/k8s_anywhere.rs | 51 ++++++--------------- 1 file changed, 14 insertions(+), 37 deletions(-) diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index 758198f..5e448b8 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -184,7 +184,7 @@ impl K8sAnywhereTopology { } } - pub async fn get_k8s_distribution(&self) -> KubernetesDistribution { + pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> { self.flavour .get_or_try_init(async || { let client = self.k8s_client().await.unwrap(); @@ -197,45 +197,22 @@ impl K8sAnywhereTopology { PreparationError::new(format!("Could not get server version: {}", e)) })?; - let rules: &[&dyn Fn() -> Option] = &[ - // OpenShift / OKD - &|| { - discovery - .groups() - .any(|g| g.name() == "project.openshift.io") - .then_some(KubernetesDistribution::OpenshiftFamily) - }, - // K3d / K3s - &|| { - version - .git_version - .contains("k3s") - .then_some(KubernetesDistribution::K3sFamily) - }, - // Fallback Kubernetes K8s - &|| { - if !discovery - .groups() - .any(|g| g.name().ends_with("openshift.io")) - && !version.git_version.contains("k3s") - && !version.git_version.contains("k3d") - { - Some(KubernetesDistribution::Default) - } else { - None - } - }, - ]; + // OpenShift / OKD + if discovery + .groups() + .any(|g| g.name() == "project.openshift.io") + { + return Ok(KubernetesDistribution::OpenshiftFamily); + } - rules.iter().find_map(|rule| rule()).ok_or_else(|| { - PreparationError::new( - "Unable to detect Kubernetes cluster distribution".to_string(), - ) - }) + // K3d / K3s + if version.git_version.contains("k3s") { + return Ok(KubernetesDistribution::K3sFamily); + } + + return Ok(KubernetesDistribution::Default); }) .await - .unwrap() - .clone() } async fn get_cluster_observability_operator_prometheus_application_score( From c0bd8007c7c44c2903c782f36c23428e22ed76dd Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Mon, 15 Sep 2025 17:07:50 -0400 Subject: [PATCH 03/12] feat(switch/brocade): Implement client to interact with Brocade Switch * Expose a high-level `brocade::init()` function to connect to a Brocade switch and automatically pick the best implementation based on its OS and version * Implement a client for Brocade switches running on Network Operating System (NOS) * Implement a client for older Brocade switches running on FastIron (partial implementation) The architecture for the library is based on 3 layers: 1. The `BrocadeClient` trait to describe the available capabilities to interact with a Brocade switch. It is partly opinionated in order to offer higher level features to group multiple commands into a single function (e.g. create a port channel). Its implementations are basically just the commands to run on the switch and the functions to parse the output. 2. The `BrocadeShell` struct to make it easier to authenticate, send commands, and interact with the switch. 3. The `ssh` module to actually connect to the switch over SSH and execute the commands. With time, we will add support for more Brocade switches and their various OS/versions. If needed, shared behavior could be extracted into a separate module to make it easier to add new implementations. --- Cargo.lock | 60 ++-- Cargo.toml | 14 +- brocade/Cargo.toml | 18 ++ brocade/examples/main.rs | 70 +++++ brocade/src/fast_iron.rs | 211 ++++++++++++++ brocade/src/lib.rs | 336 ++++++++++++++++++++++ brocade/src/network_operating_system.rs | 306 ++++++++++++++++++++ brocade/src/shell.rs | 367 ++++++++++++++++++++++++ brocade/src/ssh.rs | 113 ++++++++ harmony_types/src/id.rs | 2 +- harmony_types/src/lib.rs | 1 + harmony_types/src/net.rs | 8 +- harmony_types/src/switch.rs | 176 ++++++++++++ 13 files changed, 1658 insertions(+), 24 deletions(-) create mode 100644 brocade/Cargo.toml create mode 100644 brocade/examples/main.rs create mode 100644 brocade/src/fast_iron.rs create mode 100644 brocade/src/lib.rs create mode 100644 brocade/src/network_operating_system.rs create mode 100644 brocade/src/shell.rs create mode 100644 brocade/src/ssh.rs create mode 100644 harmony_types/src/switch.rs diff --git a/Cargo.lock b/Cargo.lock index a5a62e9..f88b4a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -665,6 +665,22 @@ dependencies = [ "serde_with", ] +[[package]] +name = "brocade" +version = "0.1.0" +dependencies = [ + "async-trait", + "env_logger", + "harmony_secret", + "harmony_types", + "log", + "regex", + "russh", + "russh-keys", + "serde", + "tokio", +] + [[package]] name = "brotli" version = "8.0.2" @@ -1822,18 +1838,6 @@ dependencies = [ "url", ] -[[package]] -name = "example-penpot" -version = "0.1.0" -dependencies = [ - "harmony", - "harmony_cli", - "harmony_macros", - "harmony_types", - "tokio", - "url", -] - [[package]] name = "example-pxe" version = "0.1.0" @@ -2428,6 +2432,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "harmony_derive" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "harmony_inventory_agent" version = "0.1.0" @@ -3874,6 +3889,19 @@ dependencies = [ "web-time", ] +[[package]] +name = "okd_host_network" +version = "0.1.0" +dependencies = [ + "harmony", + "harmony_cli", + "harmony_derive", + "harmony_inventory_agent", + "harmony_macros", + "harmony_types", + "tokio", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -4561,9 +4589,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick 1.1.3", "memchr", @@ -4573,9 +4601,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick 1.1.3", "memchr", diff --git a/Cargo.toml b/Cargo.toml index d92c0e7..a10bf81 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,8 @@ members = [ "harmony_composer", "harmony_inventory_agent", "harmony_secret_derive", - "harmony_secret", "adr/agent_discovery/mdns", + "harmony_secret", + "adr/agent_discovery/mdns", "brocade", ] [workspace.package] @@ -66,5 +67,12 @@ thiserror = "2.0.14" serde = { version = "1.0.209", features = ["derive", "rc"] } serde_json = "1.0.127" askama = "0.14" -sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] } -reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false } +sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } +reqwest = { version = "0.12", features = [ + "blocking", + "stream", + "rustls-tls", + "http2", + "json", +], default-features = false } +assertor = "0.0.4" diff --git a/brocade/Cargo.toml b/brocade/Cargo.toml new file mode 100644 index 0000000..89c4fb8 --- /dev/null +++ b/brocade/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "brocade" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +async-trait.workspace = true +harmony_types = { path = "../harmony_types" } +russh.workspace = true +russh-keys.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +regex = "1.11.3" +harmony_secret = { path = "../harmony_secret" } +serde.workspace = true diff --git a/brocade/examples/main.rs b/brocade/examples/main.rs new file mode 100644 index 0000000..34dec21 --- /dev/null +++ b/brocade/examples/main.rs @@ -0,0 +1,70 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use brocade::BrocadeOptions; +use harmony_secret::{Secret, SecretManager}; +use harmony_types::switch::PortLocation; +use serde::{Deserialize, Serialize}; + +#[derive(Secret, Clone, Debug, Serialize, Deserialize)] +struct BrocadeSwitchAuth { + username: String, + password: String, +} + +#[tokio::main] +async fn main() { + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + + // let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet + let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1 + // let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st + let switch_addresses = vec![ip]; + + let config = SecretManager::get_or_prompt::() + .await + .unwrap(); + + let brocade = brocade::init( + &switch_addresses, + 22, + &config.username, + &config.password, + Some(BrocadeOptions { + dry_run: true, + ..Default::default() + }), + ) + .await + .expect("Brocade client failed to connect"); + + let entries = brocade.get_stack_topology().await.unwrap(); + println!("Stack topology: {entries:#?}"); + + let entries = brocade.get_interfaces().await.unwrap(); + println!("Interfaces: {entries:#?}"); + + let version = brocade.version().await.unwrap(); + println!("Version: {version:?}"); + + println!("--------------"); + let mac_adddresses = brocade.get_mac_address_table().await.unwrap(); + println!("VLAN\tMAC\t\t\tPORT"); + for mac in mac_adddresses { + println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port); + } + + println!("--------------"); + let channel_name = "1"; + brocade.clear_port_channel(channel_name).await.unwrap(); + + println!("--------------"); + let channel_id = brocade.find_available_channel_id().await.unwrap(); + + println!("--------------"); + let channel_name = "HARMONY_LAG"; + let ports = [PortLocation(2, 0, 35)]; + brocade + .create_port_channel(channel_id, channel_name, &ports) + .await + .unwrap(); +} diff --git a/brocade/src/fast_iron.rs b/brocade/src/fast_iron.rs new file mode 100644 index 0000000..a1a2478 --- /dev/null +++ b/brocade/src/fast_iron.rs @@ -0,0 +1,211 @@ +use super::BrocadeClient; +use crate::{ + BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry, + PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell, +}; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; +use regex::Regex; +use std::{collections::HashSet, str::FromStr}; + +pub struct FastIronClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl FastIronClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["skip-page-display".into()]); + shell.after_all(vec!["page".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 3 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 3 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[2].to_string(), + ), + _ => ( + 1, + parse_brocade_mac_address(parts[0]).ok()?, + parts[1].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_stack_port_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing stack port entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[0]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: None, + })) + } + + fn build_port_channel_commands( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Vec { + let mut commands = vec![ + "configure terminal".to_string(), + format!("lag {channel_name} static id {channel_id}"), + ]; + + for port in ports { + commands.push(format!("ports ethernet {port}")); + } + + commands.push(format!("primary-port {}", ports[0])); + commands.push("deploy".into()); + commands.push("exit".into()); + commands.push("write memory".into()); + commands.push("exit".into()); + + commands + } +} + +#[async_trait] +impl BrocadeClient for FastIronClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + info!("[Brocade] Showing MAC address table..."); + + let output = self + .shell + .run_command("show mac-address", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show interface stack-ports", crate::ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_stack_port_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + todo!() + } + + async fn configure_interfaces( + &self, + _interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + todo!() + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show lag", ExecutionMode::Regular) + .await?; + let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex"); + + let used_ids: HashSet = output + .lines() + .filter_map(|line| { + re.captures(line) + .and_then(|c| c.get(1)) + .and_then(|id_match| id_match.as_str().parse().ok()) + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let commands = self.build_port_channel_commands(channel_id, channel_name, ports); + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".to_string(), + format!("no lag {channel_name}"), + "write memory".to_string(), + ]; + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/lib.rs b/brocade/src/lib.rs new file mode 100644 index 0000000..3822abd --- /dev/null +++ b/brocade/src/lib.rs @@ -0,0 +1,336 @@ +use std::net::IpAddr; +use std::{ + fmt::{self, Display}, + time::Duration, +}; + +use crate::network_operating_system::NetworkOperatingSystemClient; +use crate::{ + fast_iron::FastIronClient, + shell::{BrocadeSession, BrocadeShell}, +}; + +use async_trait::async_trait; +use harmony_types::net::MacAddress; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use regex::Regex; + +mod fast_iron; +mod network_operating_system; +mod shell; +mod ssh; + +#[derive(Default, Clone, Debug)] +pub struct BrocadeOptions { + pub dry_run: bool, + pub ssh: ssh::SshOptions, + pub timeouts: TimeoutConfig, +} + +#[derive(Clone, Debug)] +pub struct TimeoutConfig { + pub shell_ready: Duration, + pub command_execution: Duration, + pub cleanup: Duration, + pub message_wait: Duration, +} + +impl Default for TimeoutConfig { + fn default() -> Self { + Self { + shell_ready: Duration::from_secs(10), + command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while + cleanup: Duration::from_secs(10), + message_wait: Duration::from_millis(500), + } + } +} + +enum ExecutionMode { + Regular, + Privileged, +} + +#[derive(Clone, Debug)] +pub struct BrocadeInfo { + os: BrocadeOs, + version: String, +} + +#[derive(Clone, Debug)] +pub enum BrocadeOs { + NetworkOperatingSystem, + FastIron, + Unknown, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct MacAddressEntry { + pub vlan: u16, + pub mac_address: MacAddress, + pub port: PortDeclaration, +} + +pub type PortChannelId = u8; + +/// Represents a single physical or logical link connecting two switches within a stack or fabric. +/// +/// This structure provides a standardized view of the topology regardless of the +/// underlying Brocade OS configuration (stacking vs. fabric). +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterSwitchLink { + /// The local port on the switch where the topology command was run. + pub local_port: PortLocation, + /// The port on the directly connected neighboring switch. + pub remote_port: Option, +} + +/// Represents the key running configuration status of a single switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterfaceInfo { + /// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2"). + pub name: String, + /// The physical location of the interface. + pub port_location: PortLocation, + /// The parsed type and name prefix of the interface. + pub interface_type: InterfaceType, + /// The primary configuration mode defining the interface's behavior (L2, L3, Fabric). + pub operating_mode: Option, + /// Indicates the current state of the interface. + pub status: InterfaceStatus, +} + +/// Categorizes the functional type of a switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceType { + /// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet). + Ethernet(String), +} + +impl fmt::Display for InterfaceType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InterfaceType::Ethernet(name) => write!(f, "{name}"), + } + } +} + +/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PortOperatingMode { + /// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled). + Fabric, + /// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`). + Trunk, + /// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode). + Access, +} + +/// Defines the possible status of an interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceStatus { + /// The interface is connected. + Connected, + /// The interface is not connected and is not expected to be. + NotConnected, + /// The interface is not connected but is expected to be (configured with `no shutdown`). + SfpAbsent, +} + +pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, +) -> Result, Error> { + let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?; + + let version_info = shell + .with_session(ExecutionMode::Regular, |session| { + Box::pin(get_brocade_info(session)) + }) + .await?; + + Ok(match version_info.os { + BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)), + BrocadeOs::NetworkOperatingSystem => { + Box::new(NetworkOperatingSystemClient::init(shell, version_info)) + } + BrocadeOs::Unknown => todo!(), + }) +} + +#[async_trait] +pub trait BrocadeClient { + /// Retrieves the operating system and version details from the connected Brocade switch. + /// + /// This is typically the first call made after establishing a connection to determine + /// the switch OS family (e.g., FastIron, NOS) for feature compatibility. + /// + /// # Returns + /// + /// A `BrocadeInfo` structure containing parsed OS type and version string. + async fn version(&self) -> Result; + + /// Retrieves the dynamically learned MAC address table from the switch. + /// + /// This is crucial for discovering where specific network endpoints (MAC addresses) + /// are currently located on the physical ports. + /// + /// # Returns + /// + /// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address, + /// and the associated port name/index. + async fn get_mac_address_table(&self) -> Result, Error>; + + /// Derives the physical connections used to link multiple switches together + /// to form a single logical entity (stack, fabric, etc.). + /// + /// This abstracts the underlying configuration (e.g., stack ports, fabric ports) + /// to return a standardized view of the topology. + /// + /// # Returns + /// + /// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric. + /// If the switch is not stacked, returns an empty vector. + async fn get_stack_topology(&self) -> Result, Error>; + + /// Retrieves the status for all interfaces + /// + /// # Returns + /// + /// A vector of `InterfaceInfo` structures. + async fn get_interfaces(&self) -> Result, Error>; + + /// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.). + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error>; + + /// Scans the existing configuration to find the next available (unused) + /// Port-Channel ID (`lag` or `trunk`) for assignment. + /// + /// # Returns + /// + /// The smallest, unassigned `PortChannelId` within the supported range. + async fn find_available_channel_id(&self) -> Result; + + /// Creates and configures a new Port-Channel (Link Aggregation Group or LAG) + /// using the specified channel ID and ports. + /// + /// The resulting configuration must be persistent (saved to startup-config). + /// Assumes a static LAG configuration mode unless specified otherwise by the implementation. + /// + /// # Parameters + /// + /// * `channel_id`: The ID (e.g., 1-128) for the logical port channel. + /// * `channel_name`: A descriptive name for the LAG (used in configuration context). + /// * `ports`: A slice of `PortLocation` structs defining the physical member ports. + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error>; + + /// Removes all configuration associated with the specified Port-Channel name. + /// + /// This operation should be idempotent; attempting to clear a non-existent + /// channel should succeed (or return a benign error). + /// + /// # Parameters + /// + /// * `channel_name`: The name of the Port-Channel (LAG) to delete. + /// + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>; +} + +async fn get_brocade_info(session: &mut BrocadeSession) -> Result { + let output = session.run_command("show version").await?; + + if output.contains("Network Operating System") { + let re = Regex::new(r"Network Operating System Version:\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::NetworkOperatingSystem, + version, + }); + } else if output.contains("ICX") { + let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::FastIron, + version, + }); + } + + Err(Error::UnexpectedError("Unknown Brocade OS version".into())) +} + +fn parse_brocade_mac_address(value: &str) -> Result { + let cleaned_mac = value.replace('.', ""); + + if cleaned_mac.len() != 12 { + return Err(format!("Invalid MAC address: {value}")); + } + + let mut bytes = [0u8; 6]; + for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() { + let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?; + bytes[i] = + u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?; + } + + Ok(MacAddress(bytes)) +} + +#[derive(Debug)] +pub enum Error { + NetworkError(String), + AuthenticationError(String), + ConfigurationError(String), + TimeoutError(String), + UnexpectedError(String), + CommandError(String), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::NetworkError(msg) => write!(f, "Network error: {msg}"), + Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"), + Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"), + Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"), + Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"), + Error::CommandError(msg) => write!(f, "{msg}"), + } + } +} + +impl From for String { + fn from(val: Error) -> Self { + format!("{val}") + } +} + +impl std::error::Error for Error {} + +impl From for Error { + fn from(value: russh::Error) -> Self { + Error::NetworkError(format!("Russh client error: {value}")) + } +} diff --git a/brocade/src/network_operating_system.rs b/brocade/src/network_operating_system.rs new file mode 100644 index 0000000..4b9b271 --- /dev/null +++ b/brocade/src/network_operating_system.rs @@ -0,0 +1,306 @@ +use std::str::FromStr; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; + +use crate::{ + BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, + InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + parse_brocade_mac_address, shell::BrocadeShell, +}; + +pub struct NetworkOperatingSystemClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl NetworkOperatingSystemClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["terminal length 0".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 5 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 5 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[4].to_string(), + ), + _ => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[5].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_inter_switch_link_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing inter switch link entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[2]).ok()?; + let remote_port = PortLocation::from_str(parts[5]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: Some(remote_port), + })) + } + + fn parse_interface_status_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing interface status entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 6 { + return None; + } + + let interface_type = match parts[0] { + "Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()), + "Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()), + _ => return None, + }; + let port_location = PortLocation::from_str(parts[1]).ok()?; + let status = match parts[2] { + "connected" => InterfaceStatus::Connected, + "notconnected" => InterfaceStatus::NotConnected, + "sfpAbsent" => InterfaceStatus::SfpAbsent, + _ => return None, + }; + let operating_mode = match parts[3] { + "ISL" => Some(PortOperatingMode::Fabric), + "Trunk" => Some(PortOperatingMode::Trunk), + "Access" => Some(PortOperatingMode::Access), + "--" => None, + _ => return None, + }; + + Some(Ok(InterfaceInfo { + name: format!("{} {}", interface_type, port_location), + port_location, + interface_type, + operating_mode, + status, + })) + } +} + +#[async_trait] +impl BrocadeClient for NetworkOperatingSystemClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + let output = self + .shell + .run_command("show mac-address-table", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show fabric isl", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(6) + .filter_map(|line| self.parse_inter_switch_link_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + let output = self + .shell + .run_command( + "show interface status rbridge-id all", + ExecutionMode::Regular, + ) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_interface_status_entry(line)) + .collect() + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + info!("[Brocade] Configuring {} interface(s)...", interfaces.len()); + + let mut commands = vec!["configure terminal".to_string()]; + + for interface in interfaces { + commands.push(format!("interface {}", interface.0)); + + match interface.1 { + PortOperatingMode::Fabric => { + commands.push("fabric isl enable".into()); + commands.push("fabric trunk enable".into()); + } + PortOperatingMode::Trunk => { + commands.push("switchport".into()); + commands.push("switchport mode trunk".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + PortOperatingMode::Access => { + commands.push("switchport".into()); + commands.push("switchport mode access".into()); + commands.push("switchport access vlan 1".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + } + + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Interfaces configured."); + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show port-channel", ExecutionMode::Regular) + .await?; + + let used_ids: Vec = output + .lines() + .skip(6) + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 8 { + return None; + } + + u8::from_str(parts[0]).ok() + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let interfaces = self.get_interfaces().await?; + + let mut commands = vec![ + "configure terminal".into(), + format!("interface port-channel {}", channel_id), + "no shutdown".into(), + "exit".into(), + ]; + + for port in ports { + let interface = interfaces.iter().find(|i| i.port_location == *port); + let Some(interface) = interface else { + continue; + }; + + commands.push(format!("interface {}", interface.name)); + commands.push("no switchport".into()); + commands.push("no ip address".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + commands.push(format!("channel-group {} mode active", channel_id)); + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".into(), + format!("no interface port-channel {}", channel_name), + "exit".into(), + "write memory".into(), + ]; + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/shell.rs b/brocade/src/shell.rs new file mode 100644 index 0000000..cfa672d --- /dev/null +++ b/brocade/src/shell.rs @@ -0,0 +1,367 @@ +use std::net::IpAddr; +use std::time::Duration; +use std::time::Instant; + +use crate::BrocadeOptions; +use crate::Error; +use crate::ExecutionMode; +use crate::TimeoutConfig; +use crate::ssh; + +use log::debug; +use log::info; +use russh::ChannelMsg; +use tokio::time::timeout; + +pub struct BrocadeShell { + ip: IpAddr, + port: u16, + username: String, + password: String, + options: BrocadeOptions, + before_all_commands: Vec, + after_all_commands: Vec, +} + +impl BrocadeShell { + pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, + ) -> Result { + let ip = ip_addresses + .first() + .ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?; + + let base_options = options.unwrap_or_default(); + let options = ssh::try_init_client(username, password, ip, base_options).await?; + + Ok(Self { + ip: *ip, + port, + username: username.to_string(), + password: password.to_string(), + before_all_commands: vec![], + after_all_commands: vec![], + options, + }) + } + + pub async fn open_session(&self, mode: ExecutionMode) -> Result { + BrocadeSession::open( + self.ip, + self.port, + &self.username, + &self.password, + self.options.clone(), + mode, + ) + .await + } + + pub async fn with_session(&self, mode: ExecutionMode, callback: F) -> Result + where + F: FnOnce( + &mut BrocadeSession, + ) -> std::pin::Pin< + Box> + Send + '_>, + >, + { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = callback(&mut session).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_command(command).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_commands( + &self, + commands: Vec, + mode: ExecutionMode, + ) -> Result<(), Error> { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_commands(commands).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub fn before_all(&mut self, commands: Vec) { + self.before_all_commands = commands; + } + + pub fn after_all(&mut self, commands: Vec) { + self.after_all_commands = commands; + } +} + +pub struct BrocadeSession { + pub channel: russh::Channel, + pub mode: ExecutionMode, + pub options: BrocadeOptions, +} + +impl BrocadeSession { + pub async fn open( + ip: IpAddr, + port: u16, + username: &str, + password: &str, + options: BrocadeOptions, + mode: ExecutionMode, + ) -> Result { + let client = ssh::create_client(ip, port, username, password, &options).await?; + let mut channel = client.channel_open_session().await?; + + channel + .request_pty(false, "vt100", 80, 24, 0, 0, &[]) + .await?; + channel.request_shell(false).await?; + + wait_for_shell_ready(&mut channel, &options.timeouts).await?; + + if let ExecutionMode::Privileged = mode { + try_elevate_session(&mut channel, username, password, &options.timeouts).await?; + } + + Ok(Self { + channel, + mode, + options, + }) + } + + pub async fn close(&mut self) -> Result<(), Error> { + debug!("[Brocade] Closing session..."); + + self.channel.data(&b"exit\n"[..]).await?; + if let ExecutionMode::Privileged = self.mode { + self.channel.data(&b"exit\n"[..]).await?; + } + + let start = Instant::now(); + while start.elapsed() < self.options.timeouts.cleanup { + match timeout(self.options.timeouts.message_wait, self.channel.wait()).await { + Ok(Some(ChannelMsg::Close)) => break, + Ok(Some(_)) => continue, + Ok(None) | Err(_) => break, + } + } + + debug!("[Brocade] Session closed."); + Ok(()) + } + + pub async fn run_command(&mut self, command: &str) -> Result { + if self.should_skip_command(command) { + return Ok(String::new()); + } + + debug!("[Brocade] Running command: '{command}'..."); + + self.channel + .data(format!("{}\n", command).as_bytes()) + .await?; + tokio::time::sleep(Duration::from_millis(100)).await; + + let output = self.collect_command_output().await?; + let output = String::from_utf8(output) + .map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?; + + self.check_for_command_errors(&output, command)?; + Ok(output) + } + + pub async fn run_commands(&mut self, commands: Vec) -> Result<(), Error> { + for command in commands { + self.run_command(&command).await?; + } + Ok(()) + } + + fn should_skip_command(&self, command: &str) -> bool { + if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run { + info!("[Brocade] Dry-run mode enabled, skipping command: {command}"); + return true; + } + false + } + + async fn collect_command_output(&mut self) -> Result, Error> { + let mut output = Vec::new(); + let start = Instant::now(); + let read_timeout = Duration::from_millis(500); + let log_interval = Duration::from_secs(3); + let mut last_log = Instant::now(); + + loop { + if start.elapsed() > self.options.timeouts.command_execution { + return Err(Error::TimeoutError( + "Timeout waiting for command completion.".into(), + )); + } + + if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval { + info!("[Brocade] Waiting for command output..."); + last_log = Instant::now(); + } + + match timeout(read_timeout, self.channel.wait()).await { + Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => { + output.extend_from_slice(&data); + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output), + Ok(Some(ChannelMsg::ExitStatus { exit_status })) => { + debug!("[Brocade] Command exit status: {exit_status}"); + } + Ok(Some(_)) => continue, + Ok(None) | Err(_) => { + if output.is_empty() { + if let Ok(None) = timeout(read_timeout, self.channel.wait()).await { + break; + } + continue; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + } + } + + Ok(output) + } + + fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> { + const ERROR_PATTERNS: &[&str] = &[ + "invalid input", + "syntax error", + "command not found", + "unknown command", + "permission denied", + "access denied", + "authentication failed", + "configuration error", + "failed to", + "error:", + ]; + + let output_lower = output.to_lowercase(); + if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) { + return Err(Error::CommandError(format!( + "Command '{command}' failed: {}", + output.trim() + ))); + } + + if !command.starts_with("show") && output.trim().is_empty() { + return Err(Error::CommandError(format!( + "Command '{command}' produced no output" + ))); + } + + Ok(()) + } +} + +async fn wait_for_shell_ready( + channel: &mut russh::Channel, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + let mut buffer = Vec::new(); + let start = Instant::now(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + let output = output.trim(); + if output.ends_with('>') || output.ends_with('#') { + debug!("[Brocade] Shell ready"); + return Ok(()); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + Ok(()) +} + +async fn try_elevate_session( + channel: &mut russh::Channel, + username: &str, + password: &str, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + channel.data(&b"enable\n"[..]).await?; + let start = Instant::now(); + let mut buffer = Vec::new(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + return Ok(()); + } + + if output.contains("User Name:") { + channel.data(format!("{}\n", username).as_bytes()).await?; + buffer.clear(); + } else if output.contains("Password:") { + channel.data(format!("{}\n", password).as_bytes()).await?; + buffer.clear(); + } else if output.contains('>') { + return Err(Error::AuthenticationError( + "Enable authentication failed".into(), + )); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + + let output = String::from_utf8_lossy(&buffer); + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + Ok(()) + } else { + Err(Error::AuthenticationError(format!( + "Enable failed. Output:\n{output}" + ))) + } +} diff --git a/brocade/src/ssh.rs b/brocade/src/ssh.rs new file mode 100644 index 0000000..08ff96f --- /dev/null +++ b/brocade/src/ssh.rs @@ -0,0 +1,113 @@ +use std::borrow::Cow; +use std::sync::Arc; + +use async_trait::async_trait; +use russh::client::Handler; +use russh::kex::DH_G1_SHA1; +use russh::kex::ECDH_SHA2_NISTP256; +use russh_keys::key::SSH_RSA; + +use super::BrocadeOptions; +use super::Error; + +#[derive(Default, Clone, Debug)] +pub struct SshOptions { + pub preferred_algorithms: russh::Preferred, +} + +impl SshOptions { + fn ecdhsa_sha2_nistp256() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } + + fn legacy() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[DH_G1_SHA1]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } +} + +pub struct Client; + +#[async_trait] +impl Handler for Client { + type Error = Error; + + async fn check_server_key( + &mut self, + _server_public_key: &russh_keys::key::PublicKey, + ) -> Result { + Ok(true) + } +} + +pub async fn try_init_client( + username: &str, + password: &str, + ip: &std::net::IpAddr, + base_options: BrocadeOptions, +) -> Result { + let ssh_options = vec![ + SshOptions::default(), + SshOptions::ecdhsa_sha2_nistp256(), + SshOptions::legacy(), + ]; + + for ssh in ssh_options { + let opts = BrocadeOptions { + ssh, + ..base_options.clone() + }; + let client = create_client(*ip, 22, username, password, &opts).await; + + match client { + Ok(_) => { + return Ok(opts); + } + Err(e) => match e { + Error::NetworkError(e) => { + if e.contains("No common key exchange algorithm") { + continue; + } else { + return Err(Error::NetworkError(e)); + } + } + _ => return Err(e), + }, + } + } + + Err(Error::NetworkError( + "Could not establish ssh connection: wrong key exchange algorithm)".to_string(), + )) +} + +pub async fn create_client( + ip: std::net::IpAddr, + port: u16, + username: &str, + password: &str, + options: &BrocadeOptions, +) -> Result, Error> { + let config = russh::client::Config { + preferred: options.ssh.preferred_algorithms.clone(), + ..Default::default() + }; + let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?; + if !client.authenticate_password(username, password).await? { + return Err(Error::AuthenticationError( + "ssh authentication failed".to_string(), + )); + } + Ok(client) +} diff --git a/harmony_types/src/id.rs b/harmony_types/src/id.rs index 2cb2674..0a82906 100644 --- a/harmony_types/src/id.rs +++ b/harmony_types/src/id.rs @@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize}; /// /// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per /// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Id { value: String, } diff --git a/harmony_types/src/lib.rs b/harmony_types/src/lib.rs index 7bb1abd..098379a 100644 --- a/harmony_types/src/lib.rs +++ b/harmony_types/src/lib.rs @@ -1,2 +1,3 @@ pub mod id; pub mod net; +pub mod switch; diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index 594a3e2..51de86e 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct MacAddress(pub [u8; 6]); impl MacAddress { @@ -41,7 +41,7 @@ impl TryFrom for MacAddress { bytes[i] = u8::from_str_radix(part, 16).map_err(|_| { std::io::Error::new( std::io::ErrorKind::InvalidInput, - format!("Invalid hex value in part {}: '{}'", i, part), + format!("Invalid hex value in part {i}: '{part}'"), ) })?; } @@ -106,8 +106,8 @@ impl Serialize for Url { impl std::fmt::Display for Url { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Url::LocalFolder(path) => write!(f, "{}", path), - Url::Url(url) => write!(f, "{}", url), + Url::LocalFolder(path) => write!(f, "{path}"), + Url::Url(url) => write!(f, "{url}"), } } } diff --git a/harmony_types/src/switch.rs b/harmony_types/src/switch.rs new file mode 100644 index 0000000..2d32754 --- /dev/null +++ b/harmony_types/src/switch.rs @@ -0,0 +1,176 @@ +use std::{fmt, str::FromStr}; + +/// Simple error type for port parsing failures. +#[derive(Debug)] +pub enum PortParseError { + /// The port string did not conform to the expected S/M/P or range format. + InvalidFormat, + /// A stack, module, or port segment could not be parsed as a number. + InvalidSegment(String), +} + +impl fmt::Display for PortParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortParseError::InvalidFormat => write!(f, "Port string is in an unexpected format."), + PortParseError::InvalidSegment(s) => write!(f, "Invalid segment in port string: {}", s), + } + } +} + +/// Represents the atomic, physical location of a switch port: `//`. +/// +/// Example: `1/1/1` +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct PortLocation(pub u8, pub u8, pub u8); + +impl fmt::Display for PortLocation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}/{}", self.0, self.1, self.2) + } +} + +impl FromStr for PortLocation { + type Err = PortParseError; + + /// Parses a string slice into a `PortLocation`. + /// + /// # Examples + /// + /// ```rust + /// use std::str::FromStr; + /// use harmony_types::switch::PortLocation; + /// + /// assert_eq!(PortLocation::from_str("1/1/1").unwrap(), PortLocation(1, 1, 1)); + /// assert_eq!(PortLocation::from_str("12/5/48").unwrap(), PortLocation(12, 5, 48)); + /// assert!(PortLocation::from_str("1/A/1").is_err()); + /// ``` + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('/').collect(); + + if parts.len() != 3 { + return Err(PortParseError::InvalidFormat); + } + + let parse_segment = |part: &str| -> Result { + u8::from_str(part).map_err(|_| PortParseError::InvalidSegment(part.to_string())) + }; + + let stack = parse_segment(parts[0])?; + let module = parse_segment(parts[1])?; + let port = parse_segment(parts[2])?; + + Ok(PortLocation(stack, module, port)) + } +} + +/// Represents a Port configuration input, which can be a single port, a sequential range, +/// or an explicit set defined by endpoints. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub enum PortDeclaration { + /// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)` + Single(PortLocation), + /// A strictly sequential range defined by two endpoints using the hyphen separator (`-`). + /// All ports between the endpoints (inclusive) are implicitly included. + /// Example: `PortDeclaration::Range(1/1/1, 1/1/4)` + Range(PortLocation, PortLocation), + /// A set of ports defined by two endpoints using the asterisk separator (`*`). + /// The actual member ports must be determined contextually (e.g., from MAC tables or + /// explicit configuration lists). + /// Example: `PortDeclaration::Set(1/1/1, 1/1/3)` where only ports 1 and 3 might be active. + Set(PortLocation, PortLocation), +} + +impl PortDeclaration { + /// Parses a port configuration string into a structured `PortDeclaration` enum. + /// + /// This function performs only basic format and numerical parsing, assuming the input + /// strings (e.g., from `show` commands) are semantically valid and logically ordered. + /// + /// # Supported Formats + /// + /// * **Single Port:** `"1/1/1"` + /// * **Range (Hyphen, `-`):** `"1/1/1-1/1/4"` + /// * **Set (Asterisk, `*`):** `"1/1/1*1/1/4"` + /// + /// # Errors + /// + /// Returns `PortParseError` if the string format is incorrect or numerical segments + /// cannot be parsed. + /// + /// # Examples + /// + /// ```rust + /// use harmony_types::switch::{PortDeclaration, PortLocation}; + /// + /// // Single Port + /// assert_eq!(PortDeclaration::parse("3/2/15").unwrap(), PortDeclaration::Single(PortLocation(3, 2, 15))); + /// + /// // Range (Hyphen) - implies sequential ports + /// let result_range = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + /// assert_eq!(result_range, PortDeclaration::Range(PortLocation(1, 1, 1), PortLocation(1, 1, 4))); + /// + /// // Set (Asterisk) - implies non-sequential set defined by endpoints + /// let result_set = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + /// assert_eq!(result_set, PortDeclaration::Set(PortLocation(1, 1, 48), PortLocation(2, 1, 48))); + /// + /// // Invalid Format (will still fail basic parsing) + /// assert!(PortDeclaration::parse("1/1/1/1").is_err()); + /// ``` + pub fn parse(port_str: &str) -> Result { + if let Some((start_str, end_str)) = port_str.split_once('-') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Range(start_port, end_port)); + } + + if let Some((start_str, end_str)) = port_str.split_once('*') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Set(start_port, end_port)); + } + + let location = PortLocation::from_str(port_str)?; + Ok(PortDeclaration::Single(location)) + } +} + +impl fmt::Display for PortDeclaration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortDeclaration::Single(port) => write!(f, "{port}"), + PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"), + PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_port_location_invalid() { + assert!(PortLocation::from_str("1/1").is_err()); + assert!(PortLocation::from_str("1/A/1").is_err()); + assert!(PortLocation::from_str("1/1/256").is_err()); + } + + #[test] + fn test_parse_declaration_single() { + let single_result = PortDeclaration::parse("1/1/4").unwrap(); + assert!(matches!(single_result, PortDeclaration::Single(_))); + } + + #[test] + fn test_parse_declaration_range() { + let range_result = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + assert!(matches!(range_result, PortDeclaration::Range(_, _))); + } + + #[test] + fn test_parse_declaration_set() { + let set_result = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + assert!(matches!(set_result, PortDeclaration::Set(_, _))); + } +} From ea39d93aa79037ac3bc847638a1ea837207fe0fc Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Mon, 15 Sep 2025 17:07:50 -0400 Subject: [PATCH 04/12] feat(host_network): configure bonds on the host and switch port channels --- Cargo.lock | 36 +- brocade/src/network_operating_system.rs | 2 +- examples/okd_installation/src/topology.rs | 2 +- harmony/Cargo.toml | 3 + harmony/src/domain/topology/ha_cluster.rs | 299 ++++++++++++- harmony/src/domain/topology/network.rs | 84 +++- harmony/src/infra/brocade.rs | 385 +++++++++++++++++ harmony/src/infra/mod.rs | 1 + .../modules/okd/bootstrap_03_control_plane.rs | 41 +- harmony/src/modules/okd/crd/mod.rs | 41 ++ harmony/src/modules/okd/crd/nmstate.rs | 251 +++++++++++ harmony/src/modules/okd/host_network.rs | 394 ++++++++++++++++++ harmony/src/modules/okd/mod.rs | 2 + harmony_composer/src/main.rs | 4 + 14 files changed, 1477 insertions(+), 68 deletions(-) create mode 100644 harmony/src/infra/brocade.rs create mode 100644 harmony/src/modules/okd/crd/mod.rs create mode 100644 harmony/src/modules/okd/crd/nmstate.rs create mode 100644 harmony/src/modules/okd/host_network.rs diff --git a/Cargo.lock b/Cargo.lock index f88b4a6..0b5972f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,6 +429,15 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "assertor" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb" +dependencies = [ + "num-traits", +] + [[package]] name = "async-broadcast" version = "0.7.2" @@ -2333,9 +2342,11 @@ name = "harmony" version = "0.1.0" dependencies = [ "askama", + "assertor", "async-trait", "base64 0.22.1", "bollard", + "brocade", "chrono", "cidr", "convert_case", @@ -2366,6 +2377,7 @@ dependencies = [ "once_cell", "opnsense-config", "opnsense-config-xml", + "option-ext", "pretty_assertions", "reqwest 0.11.27", "russh", @@ -2432,17 +2444,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "harmony_derive" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d138bbb32bb346299c5f95fbb53532313f39927cb47c411c99c634ef8665ef7" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "harmony_inventory_agent" version = "0.1.0" @@ -3889,19 +3890,6 @@ dependencies = [ "web-time", ] -[[package]] -name = "okd_host_network" -version = "0.1.0" -dependencies = [ - "harmony", - "harmony_cli", - "harmony_derive", - "harmony_inventory_agent", - "harmony_macros", - "harmony_types", - "tokio", -] - [[package]] name = "once_cell" version = "1.21.3" diff --git a/brocade/src/network_operating_system.rs b/brocade/src/network_operating_system.rs index 4b9b271..b14bc08 100644 --- a/brocade/src/network_operating_system.rs +++ b/brocade/src/network_operating_system.rs @@ -270,7 +270,7 @@ impl BrocadeClient for NetworkOperatingSystemClient { commands.push("no ip address".into()); commands.push("no fabric isl enable".into()); commands.push("no fabric trunk enable".into()); - commands.push(format!("channel-group {} mode active", channel_id)); + commands.push(format!("channel-group {channel_id} mode active")); commands.push("no shutdown".into()); commands.push("exit".into()); } diff --git a/examples/okd_installation/src/topology.rs b/examples/okd_installation/src/topology.rs index 02553a5..31062f5 100644 --- a/examples/okd_installation/src/topology.rs +++ b/examples/okd_installation/src/topology.rs @@ -1,6 +1,6 @@ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + hardware::{Location, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index ad57db1..634cbe9 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -77,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" } askama.workspace = true sqlx.workspace = true inquire.workspace = true +brocade = { path = "../brocade" } +option-ext = "0.2.0" [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index c9f565e..7be2725 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -1,33 +1,36 @@ use async_trait::async_trait; +use brocade::BrocadeOptions; use harmony_macros::ip; -use harmony_types::net::MacAddress; -use harmony_types::net::Url; +use harmony_secret::SecretManager; +use harmony_types::{ + net::{MacAddress, Url}, + switch::PortLocation, +}; +use k8s_openapi::api::core::v1::Namespace; +use kube::api::ObjectMeta; use log::debug; use log::info; use crate::data::FileContent; use crate::executors::ExecutorError; +use crate::hardware::PhysicalHost; +use crate::infra::brocade::BrocadeSwitchAuth; +use crate::infra::brocade::BrocadeSwitchClient; +use crate::modules::okd::crd::{ + InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec, + nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec}, +}; use crate::topology::PxeOptions; -use super::DHCPStaticEntry; -use super::DhcpServer; -use super::DnsRecord; -use super::DnsRecordType; -use super::DnsServer; -use super::Firewall; -use super::HttpServer; -use super::IpAddress; -use super::K8sclient; -use super::LoadBalancer; -use super::LoadBalancerService; -use super::LogicalHost; -use super::PreparationError; -use super::PreparationOutcome; -use super::Router; -use super::TftpServer; +use super::{ + DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, + HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, + PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer, + Topology, k8s::K8sClient, +}; -use super::Topology; -use super::k8s::K8sClient; +use std::collections::BTreeMap; +use std::net::IpAddr; use std::sync::Arc; #[derive(Debug, Clone)] @@ -89,6 +92,231 @@ impl HAClusterTopology { .to_string() } + async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> { + // FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate) + debug!("Installing NMState operator..."); + let k8s_client = self.k8s_client().await?; + + let nmstate_namespace = Namespace { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + finalizers: Some(vec!["kubernetes".to_string()]), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState namespace: {nmstate_namespace:#?}"); + k8s_client + .apply(&nmstate_namespace, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_operator_group = OperatorGroup { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: OperatorGroupSpec { + target_namespaces: vec!["openshift-nmstate".to_string()], + }, + }; + debug!("Creating NMState operator group: {nmstate_operator_group:#?}"); + k8s_client + .apply(&nmstate_operator_group, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_subscription = Subscription { + metadata: ObjectMeta { + name: Some("kubernetes-nmstate-operator".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: SubscriptionSpec { + channel: Some("stable".to_string()), + install_plan_approval: Some(InstallPlanApproval::Automatic), + name: "kubernetes-nmstate-operator".to_string(), + source: "redhat-operators".to_string(), + source_namespace: "openshift-marketplace".to_string(), + }, + }; + debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}"); + k8s_client + .apply(&nmstate_subscription, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate = NMState { + metadata: ObjectMeta { + name: Some("nmstate".to_string()), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState: {nmstate:#?}"); + k8s_client + .apply(&nmstate, None) + .await + .map_err(|e| e.to_string())?; + + Ok(()) + } + + fn get_next_bond_id(&self) -> u8 { + 42 // FIXME: Find a better way to declare the bond id + } + + async fn configure_bond( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.ensure_nmstate_operator_installed() + .await + .map_err(|e| { + SwitchError::new(format!( + "Can't configure bond, NMState operator not available: {e}" + )) + })?; + + let bond_config = self.create_bond_configuration(host, config); + debug!("Configuring bond for host {host:?}: {bond_config:#?}"); + self.k8s_client() + .await + .unwrap() + .apply(&bond_config, None) + .await + .unwrap(); + + todo!() + } + + fn create_bond_configuration( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> NodeNetworkConfigurationPolicy { + let host_name = host.id.clone(); + + let bond_id = self.get_next_bond_id(); + let bond_name = format!("bond{bond_id}"); + let mut bond_mtu: Option = None; + let mut bond_mac_address: Option = None; + let mut bond_ports = Vec::new(); + let mut interfaces: Vec = Vec::new(); + + for switch_port in &config.switch_ports { + let interface_name = switch_port.interface.name.clone(); + + interfaces.push(nmstate::InterfaceSpec { + name: interface_name.clone(), + description: Some(format!("Member of bond {bond_name}")), + r#type: "ethernet".to_string(), + state: "up".to_string(), + mtu: Some(switch_port.interface.mtu), + mac_address: Some(switch_port.interface.mac_address.to_string()), + ipv4: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + link_aggregation: None, + ..Default::default() + }); + + bond_ports.push(interface_name); + + // Use the first port's details for the bond mtu and mac address + if bond_mtu.is_none() { + bond_mtu = Some(switch_port.interface.mtu); + } + if bond_mac_address.is_none() { + bond_mac_address = Some(switch_port.interface.mac_address.to_string()); + } + } + + interfaces.push(nmstate::InterfaceSpec { + name: bond_name.clone(), + description: Some(format!("Network bond for host {host_name}")), + r#type: "bond".to_string(), + state: "up".to_string(), + mtu: bond_mtu, + mac_address: bond_mac_address, + ipv4: Some(nmstate::IpStackSpec { + dhcp: Some(true), + enabled: Some(true), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + dhcp: Some(true), + autoconf: Some(true), + enabled: Some(true), + ..Default::default() + }), + link_aggregation: Some(nmstate::BondSpec { + mode: "802.3ad".to_string(), + ports: bond_ports, + ..Default::default() + }), + ..Default::default() + }); + + NodeNetworkConfigurationPolicy { + metadata: ObjectMeta { + name: Some(format!("{host_name}-bond-config")), + ..Default::default() + }, + spec: NodeNetworkConfigurationPolicySpec { + node_selector: Some(BTreeMap::from([( + "kubernetes.io/hostname".to_string(), + host_name.to_string(), + )])), + desired_state: nmstate::DesiredStateSpec { interfaces }, + }, + } + } + + async fn get_switch_client(&self) -> Result, SwitchError> { + let auth = SecretManager::get_or_prompt::() + .await + .map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?; + + // FIXME: We assume Brocade switches + let switches: Vec = self.switch.iter().map(|s| s.ip).collect(); + let brocade_options = Some(BrocadeOptions { + dry_run: *crate::config::DRY_RUN, + ..Default::default() + }); + let client = + BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options) + .await + .map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?; + + Ok(Box::new(client)) + } + + async fn configure_port_channel( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + debug!("Configuring port channel: {config:#?}"); + let client = self.get_switch_client().await?; + + let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect(); + + client + .configure_port_channel(&format!("Harmony_{}", host.id), switch_ports) + .await + .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?; + + Ok(()) + } + pub fn autoload() -> Self { let dummy_infra = Arc::new(DummyInfra {}); let dummy_host = LogicalHost { @@ -263,6 +491,33 @@ impl HttpServer for HAClusterTopology { } } +#[async_trait] +impl Switch for HAClusterTopology { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let client = self.get_switch_client().await?; + client.setup().await?; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let client = self.get_switch_client().await?; + let port = client.find_port(mac_address).await?; + Ok(port) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.configure_bond(host, &config).await?; + self.configure_port_channel(host, &config).await + } +} + #[derive(Debug)] pub struct DummyInfra; @@ -332,8 +587,8 @@ impl DhcpServer for DummyInfra { } async fn set_dhcp_range( &self, - start: &IpAddress, - end: &IpAddress, + _start: &IpAddress, + _end: &IpAddress, ) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index c7ab5cc..99db03a 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -1,10 +1,14 @@ -use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; +use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc}; use async_trait::async_trait; -use harmony_types::net::{IpAddress, MacAddress}; +use derive_new::new; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::PortLocation, +}; use serde::Serialize; -use crate::executors::ExecutorError; +use crate::{executors::ExecutorError, hardware::PhysicalHost}; use super::{LogicalHost, k8s::K8sClient}; @@ -172,6 +176,80 @@ impl FromStr for DnsRecordType { } } +#[async_trait] +pub trait Switch: Send + Sync { + async fn setup_switch(&self) -> Result<(), SwitchError>; + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError>; +} + +#[derive(Clone, Debug, PartialEq)] +pub struct HostNetworkConfig { + pub switch_ports: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SwitchPort { + pub interface: NetworkInterface, + pub port: PortLocation, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct NetworkInterface { + pub name: String, + pub mac_address: MacAddress, + pub speed_mbps: Option, + pub mtu: u32, +} + +#[derive(Debug, Clone, new)] +pub struct SwitchError { + msg: String, +} + +impl std::fmt::Display for SwitchError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.msg) + } +} + +impl Error for SwitchError {} + +#[async_trait] +pub trait SwitchClient: Send + Sync { + /// Executes essential, idempotent, one-time initial configuration steps. + /// + /// This is an opiniated procedure that setups a switch to provide high availability + /// capabilities as decided by the NationTech team. + /// + /// This includes tasks like enabling switchport for all interfaces + /// except the ones intended for Fabric Networking, etc. + /// + /// The implementation must ensure the operation is **idempotent** (safe to run multiple times) + /// and that it doesn't break existing configurations. + async fn setup(&self) -> Result<(), SwitchError>; + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result; +} + #[cfg(test)] mod test { use std::sync::Arc; diff --git a/harmony/src/infra/brocade.rs b/harmony/src/infra/brocade.rs new file mode 100644 index 0000000..f721328 --- /dev/null +++ b/harmony/src/infra/brocade.rs @@ -0,0 +1,385 @@ +use async_trait::async_trait; +use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode}; +use harmony_secret::Secret; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::{PortDeclaration, PortLocation}, +}; +use option_ext::OptionExt; +use serde::{Deserialize, Serialize}; + +use crate::topology::{SwitchClient, SwitchError}; + +pub struct BrocadeSwitchClient { + brocade: Box, +} + +impl BrocadeSwitchClient { + pub async fn init( + ip_addresses: &[IpAddress], + username: &str, + password: &str, + options: Option, + ) -> Result { + let brocade = brocade::init(ip_addresses, 22, username, password, options).await?; + Ok(Self { brocade }) + } +} + +#[async_trait] +impl SwitchClient for BrocadeSwitchClient { + async fn setup(&self) -> Result<(), SwitchError> { + let stack_topology = self + .brocade + .get_stack_topology() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces = self + .brocade + .get_interfaces() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces: Vec<(String, PortOperatingMode)> = interfaces + .into_iter() + .filter(|interface| { + interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected + }) + .filter(|interface| { + !stack_topology.iter().any(|link: &InterSwitchLink| { + link.local_port == interface.port_location + || link.remote_port.contains(&interface.port_location) + }) + }) + .map(|interface| (interface.name.clone(), PortOperatingMode::Access)) + .collect(); + + if interfaces.is_empty() { + return Ok(()); + } + + self.brocade + .configure_interfaces(interfaces) + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + Ok(()) + } + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let table = self + .brocade + .get_mac_address_table() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + let port = table + .iter() + .find(|entry| entry.mac_address == *mac_address) + .map(|entry| match &entry.port { + PortDeclaration::Single(port_location) => Ok(port_location.clone()), + _ => Err(SwitchError::new( + "Multiple ports found for MAC address".into(), + )), + }); + + match port { + Some(Ok(p)) => Ok(Some(p)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result { + let channel_id = self + .brocade + .find_available_channel_id() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + self.brocade + .create_port_channel(channel_id, channel_name, &switch_ports) + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + Ok(channel_id) + } +} + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + + use assertor::*; + use async_trait::async_trait; + use brocade::{ + BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus, + InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + }; + use harmony_types::switch::PortLocation; + + use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient}; + + #[tokio::test] + async fn setup_should_configure_ethernet_interfaces_as_access_ports() { + let first_interface = given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(); + let second_interface = given_interface() + .with_port_location(PortLocation(1, 0, 4)) + .build(); + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![first_interface.clone(), second_interface.clone()], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).contains_exactly(vec![ + (first_interface.name.clone(), PortOperatingMode::Access), + (second_interface.name.clone(), PortOperatingMode::Access), + ]); + } + + #[tokio::test] + async fn setup_with_an_already_configured_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_operating_mode(Some(PortOperatingMode::Access)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_a_disconnected_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_status(InterfaceStatus::SfpAbsent) + .build(), + given_interface() + .with_status(InterfaceStatus::NotConnected) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![ + given_inter_switch_link() + .between(PortLocation(1, 0, 1), PortLocation(2, 0, 1)) + .build(), + given_inter_switch_link() + .between(PortLocation(2, 0, 2), PortLocation(3, 0, 1)) + .build(), + ], + vec![ + given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(2, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(3, 0, 1)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[derive(Clone)] + struct FakeBrocadeClient { + stack_topology: Vec, + interfaces: Vec, + configured_interfaces: Arc>>, + } + + #[async_trait] + impl BrocadeClient for FakeBrocadeClient { + async fn version(&self) -> Result { + todo!() + } + + async fn get_mac_address_table(&self) -> Result, Error> { + todo!() + } + + async fn get_stack_topology(&self) -> Result, Error> { + Ok(self.stack_topology.clone()) + } + + async fn get_interfaces(&self) -> Result, Error> { + Ok(self.interfaces.clone()) + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + let mut configured_interfaces = self.configured_interfaces.lock().unwrap(); + *configured_interfaces = interfaces; + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + todo!() + } + + async fn create_port_channel( + &self, + _channel_id: PortChannelId, + _channel_name: &str, + _ports: &[PortLocation], + ) -> Result<(), Error> { + todo!() + } + + async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> { + todo!() + } + } + + impl FakeBrocadeClient { + fn new(stack_topology: Vec, interfaces: Vec) -> Self { + Self { + stack_topology, + interfaces, + configured_interfaces: Arc::new(Mutex::new(vec![])), + } + } + } + + struct InterfaceInfoBuilder { + port_location: Option, + interface_type: Option, + operating_mode: Option, + status: Option, + } + + impl InterfaceInfoBuilder { + fn build(&self) -> InterfaceInfo { + let interface_type = self + .interface_type + .clone() + .unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into())); + let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1)); + let name = format!("{interface_type} {port_location}"); + let status = self.status.clone().unwrap_or(InterfaceStatus::Connected); + + InterfaceInfo { + name, + port_location, + interface_type, + operating_mode: self.operating_mode.clone(), + status, + } + } + + fn with_port_location(self, port_location: PortLocation) -> Self { + Self { + port_location: Some(port_location), + ..self + } + } + + fn with_operating_mode(self, operating_mode: Option) -> Self { + Self { + operating_mode, + ..self + } + } + + fn with_status(self, status: InterfaceStatus) -> Self { + Self { + status: Some(status), + ..self + } + } + } + + struct InterSwitchLinkBuilder { + link: Option<(PortLocation, PortLocation)>, + } + + impl InterSwitchLinkBuilder { + fn build(&self) -> InterSwitchLink { + let link = self + .link + .clone() + .unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1))); + + InterSwitchLink { + local_port: link.0, + remote_port: Some(link.1), + } + } + + fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self { + Self { + link: Some((local_port, remote_port)), + } + } + } + + fn given_interface() -> InterfaceInfoBuilder { + InterfaceInfoBuilder { + port_location: None, + interface_type: None, + operating_mode: None, + status: None, + } + } + + fn given_inter_switch_link() -> InterSwitchLinkBuilder { + InterSwitchLinkBuilder { link: None } + } +} diff --git a/harmony/src/infra/mod.rs b/harmony/src/infra/mod.rs index c05c7b6..203cf90 100644 --- a/harmony/src/infra/mod.rs +++ b/harmony/src/infra/mod.rs @@ -1,3 +1,4 @@ +pub mod brocade; pub mod executors; pub mod hp_ilo; pub mod intel_amt; diff --git a/harmony/src/modules/okd/bootstrap_03_control_plane.rs b/harmony/src/modules/okd/bootstrap_03_control_plane.rs index ba9e12d..af8e71f 100644 --- a/harmony/src/modules/okd/bootstrap_03_control_plane.rs +++ b/harmony/src/modules/okd/bootstrap_03_control_plane.rs @@ -5,8 +5,10 @@ use crate::{ interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::{HostRole, Inventory}, modules::{ - dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, - inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, + dhcp::DhcpHostBindingScore, + http::IPxeMacBootFileScore, + inventory::DiscoverHostForRoleScore, + okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl}, }, score::Score, topology::{HAClusterTopology, HostBinding}, @@ -28,7 +30,7 @@ pub struct OKDSetup03ControlPlaneScore {} impl Score for OKDSetup03ControlPlaneScore { fn create_interpret(&self) -> Box> { - Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) + Box::new(OKDSetup03ControlPlaneInterpret::new()) } fn name(&self) -> String { @@ -38,17 +40,15 @@ impl Score for OKDSetup03ControlPlaneScore { #[derive(Debug, Clone)] pub struct OKDSetup03ControlPlaneInterpret { - score: OKDSetup03ControlPlaneScore, version: Version, status: InterpretStatus, } impl OKDSetup03ControlPlaneInterpret { - pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { + pub fn new() -> Self { let version = Version::from("1.0.0").unwrap(); Self { version, - score, status: InterpretStatus::QUEUED, } } @@ -159,7 +159,7 @@ impl OKDSetup03ControlPlaneInterpret { } .to_string(); - debug!("[ControlPlane] iPXE content template:\n{}", content); + debug!("[ControlPlane] iPXE content template:\n{content}"); // Create and apply an iPXE boot file for each node. for node in nodes { @@ -189,16 +189,13 @@ impl OKDSetup03ControlPlaneInterpret { /// Prompts the user to reboot the target control plane nodes. async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); - info!( - "[ControlPlane] Requesting reboot for control plane nodes: {:?}", - node_ids - ); + info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",); let confirmation = inquire::Confirm::new( &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; if !confirmation { return Err(InterpretError::new( @@ -210,14 +207,23 @@ impl OKDSetup03ControlPlaneInterpret { } /// Placeholder for automating network bonding configuration. - async fn persist_network_bond(&self) -> Result<(), InterpretError> { - // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. - info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); + async fn persist_network_bond( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + hosts: &Vec, + ) -> Result<(), InterpretError> { + info!("[ControlPlane] Ensuring persistent bonding"); + let score = HostNetworkConfigurationScore { + hosts: hosts.clone(), + }; + score.interpret(inventory, topology).await?; + inquire::Confirm::new( "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; Ok(()) } @@ -260,7 +266,8 @@ impl Interpret for OKDSetup03ControlPlaneInterpret { self.reboot_targets(&nodes).await?; // 5. Placeholder for post-boot network configuration (e.g., bonding). - self.persist_network_bond().await?; + self.persist_network_bond(inventory, topology, &nodes) + .await?; // TODO: Implement a step to wait for the control plane nodes to join the cluster // and for the cluster operators to become available. This would be similar to diff --git a/harmony/src/modules/okd/crd/mod.rs b/harmony/src/modules/okd/crd/mod.rs new file mode 100644 index 0000000..c1a68ce --- /dev/null +++ b/harmony/src/modules/okd/crd/mod.rs @@ -0,0 +1,41 @@ +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +pub mod nmstate; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1", + kind = "OperatorGroup", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct OperatorGroupSpec { + pub target_namespaces: Vec, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1alpha1", + kind = "Subscription", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct SubscriptionSpec { + pub name: String, + pub source: String, + pub source_namespace: String, + pub channel: Option, + pub install_plan_approval: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub enum InstallPlanApproval { + #[serde(rename = "Automatic")] + Automatic, + #[serde(rename = "Manual")] + Manual, +} diff --git a/harmony/src/modules/okd/crd/nmstate.rs b/harmony/src/modules/okd/crd/nmstate.rs new file mode 100644 index 0000000..5f71e4e --- /dev/null +++ b/harmony/src/modules/okd/crd/nmstate.rs @@ -0,0 +1,251 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)] +#[serde(rename_all = "camelCase")] +pub struct NMStateSpec { + pub probe_configuration: Option, +} + +impl Default for NMState { + fn default() -> Self { + Self { + metadata: Default::default(), + spec: NMStateSpec { + probe_configuration: None, + }, + } + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeConfig { + pub dns: ProbeDns, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeDns { + pub host: String, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "nmstate.io", + version = "v1", + kind = "NodeNetworkConfigurationPolicy", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct NodeNetworkConfigurationPolicySpec { + pub node_selector: Option>, + pub desired_state: DesiredStateSpec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct DesiredStateSpec { + pub interfaces: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InterfaceSpec { + pub name: String, + pub description: Option, + pub r#type: String, + pub state: String, + pub mac_address: Option, + pub mtu: Option, + pub controller: Option, + pub ipv4: Option, + pub ipv6: Option, + pub ethernet: Option, + pub link_aggregation: Option, + pub vlan: Option, + pub vxlan: Option, + pub mac_vtap: Option, + pub mac_vlan: Option, + pub infiniband: Option, + pub linux_bridge: Option, + pub ovs_bridge: Option, + pub ethtool: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpStackSpec { + pub enabled: Option, + pub dhcp: Option, + pub autoconf: Option, + pub address: Option>, + pub auto_dns: Option, + pub auto_gateway: Option, + pub auto_routes: Option, + pub dhcp_client_id: Option, + pub dhcp_duid: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpAddressSpec { + pub ip: String, + pub prefix_length: u8, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthernetSpec { + pub speed: Option, + pub duplex: Option, + pub auto_negotiation: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct BondSpec { + pub mode: String, + pub ports: Vec, + pub options: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanSpec { + pub base_iface: String, + pub id: u16, + pub protocol: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VxlanSpec { + pub base_iface: String, + pub id: u32, + pub remote: String, + pub local: Option, + pub learning: Option, + pub destination_port: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVtapSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVlanSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InfinibandSpec { + pub base_iface: String, + pub pkey: String, + pub mode: String, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeOptions { + pub mac_ageing_time: Option, + pub multicast_snooping: Option, + pub stp: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct StpOptions { + pub enabled: Option, + pub forward_delay: Option, + pub hello_time: Option, + pub max_age: Option, + pub priority: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePort { + pub name: String, + pub vlan: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePortVlan { + pub mode: Option, + pub trunk_tags: Option>, + pub tag: Option, + pub enable_native: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanTag { + pub id: u16, + pub id_range: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanIdRange { + pub min: u16, + pub max: u16, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeOptions { + pub stp: Option, + pub rstp: Option, + pub mcast_snooping_enable: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsPortSpec { + pub name: String, + pub link_aggregation: Option, + pub vlan: Option, + pub r#type: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolSpec { + // TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool) +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolFecSpec { + pub auto: Option, + pub mode: Option, +} diff --git a/harmony/src/modules/okd/host_network.rs b/harmony/src/modules/okd/host_network.rs new file mode 100644 index 0000000..3bc8c3c --- /dev/null +++ b/harmony/src/modules/okd/host_network.rs @@ -0,0 +1,394 @@ +use async_trait::async_trait; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology}, +}; + +#[derive(Debug, Clone, Serialize)] +pub struct HostNetworkConfigurationScore { + pub hosts: Vec, +} + +impl Score for HostNetworkConfigurationScore { + fn name(&self) -> String { + "HostNetworkConfigurationScore".into() + } + + fn create_interpret(&self) -> Box> { + Box::new(HostNetworkConfigurationInterpret { + score: self.clone(), + }) + } +} + +#[derive(Debug)] +pub struct HostNetworkConfigurationInterpret { + score: HostNetworkConfigurationScore, +} + +impl HostNetworkConfigurationInterpret { + async fn configure_network_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result<(), InterpretError> { + let switch_ports = self.collect_switch_ports_for_host(topology, host).await?; + if !switch_ports.is_empty() { + topology + .configure_host_network(host, HostNetworkConfig { switch_ports }) + .await + .map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?; + } + + Ok(()) + } + + async fn collect_switch_ports_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result, InterpretError> { + let mut switch_ports = vec![]; + + for network_interface in &host.network { + let mac_address = network_interface.mac_address; + + match topology.get_port_for_mac_address(&mac_address).await { + Ok(Some(port)) => { + switch_ports.push(SwitchPort { + interface: NetworkInterface { + name: network_interface.name.clone(), + mac_address, + speed_mbps: network_interface.speed_mbps, + mtu: network_interface.mtu, + }, + port, + }); + } + Ok(None) => debug!("No port found for host '{}', skipping", host.id), + Err(e) => { + return Err(InterpretError::new(format!( + "Failed to get port for host '{}': {}", + host.id, e + ))); + } + } + } + + Ok(switch_ports) + } +} + +#[async_trait] +impl Interpret for HostNetworkConfigurationInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("HostNetworkConfigurationInterpret") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + if self.score.hosts.is_empty() { + return Ok(Outcome::noop("No hosts to configure".into())); + } + + info!( + "Started network configuration for {} host(s)...", + self.score.hosts.len() + ); + + topology + .setup_switch() + .await + .map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?; + + let mut configured_host_count = 0; + for host in &self.score.hosts { + self.configure_network_for_host(topology, host).await?; + configured_host_count += 1; + } + + if configured_host_count > 0 { + Ok(Outcome::success(format!( + "Configured {configured_host_count}/{} host(s)", + self.score.hosts.len() + ))) + } else { + Ok(Outcome::noop("No hosts configured".into())) + } + } +} + +#[cfg(test)] +mod tests { + use assertor::*; + use harmony_types::{net::MacAddress, switch::PortLocation}; + use lazy_static::lazy_static; + + use crate::{ + hardware::HostCategory, + topology::{ + HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort, + }, + }; + use std::{ + str::FromStr, + sync::{Arc, Mutex}, + }; + + use super::*; + + lazy_static! { + pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap(); + pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap(); + pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(), + name: "interface-1".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(), + name: "interface-2".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(), + name: "unknown-interface".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref PORT: PortLocation = PortLocation(1, 0, 42); + pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42); + } + + #[tokio::test] + async fn should_setup_switch() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let switch_setup = topology.switch_setup.lock().unwrap(); + assert_that!(*switch_setup).is_true(); + } + + #[tokio::test] + async fn host_with_one_mac_address_should_create_bond_with_one_interface() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + )]); + } + + #[tokio::test] + async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() { + let score = given_score(vec![given_host( + &HOST_ID, + vec![ + EXISTING_INTERFACE.clone(), + ANOTHER_EXISTING_INTERFACE.clone(), + ], + )]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![ + SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }, + SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }, + ], + }, + )]); + } + + #[tokio::test] + async fn multiple_hosts_should_create_one_bond_per_host() { + let score = given_score(vec![ + given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]), + given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]), + ]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![ + ( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + ), + ( + ANOTHER_HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }], + }, + ), + ]); + } + + #[tokio::test] + async fn port_not_found_for_mac_address_should_not_configure_interface() { + let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]); + let topology = TopologyWithSwitch::new_port_not_found(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).is_empty(); + } + + fn given_score(hosts: Vec) -> HostNetworkConfigurationScore { + HostNetworkConfigurationScore { hosts } + } + + fn given_host(id: &Id, network_interfaces: Vec) -> PhysicalHost { + let network = network_interfaces.iter().map(given_interface).collect(); + + PhysicalHost { + id: id.clone(), + category: HostCategory::Server, + network, + storage: vec![], + labels: vec![], + memory_modules: vec![], + cpus: vec![], + } + } + + fn given_interface( + interface: &NetworkInterface, + ) -> harmony_inventory_agent::hwinfo::NetworkInterface { + harmony_inventory_agent::hwinfo::NetworkInterface { + name: interface.name.clone(), + mac_address: interface.mac_address, + speed_mbps: interface.speed_mbps, + is_up: true, + mtu: interface.mtu, + ipv4_addresses: vec![], + ipv6_addresses: vec![], + driver: "driver".into(), + firmware_version: None, + } + } + + struct TopologyWithSwitch { + available_ports: Arc>>, + configured_host_networks: Arc>>, + switch_setup: Arc>, + } + + impl TopologyWithSwitch { + fn new() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + + fn new_port_not_found() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + } + + #[async_trait] + impl Topology for TopologyWithSwitch { + fn name(&self) -> &str { + "SwitchWithPortTopology" + } + + async fn ensure_ready(&self) -> Result { + Ok(PreparationOutcome::Success { details: "".into() }) + } + } + + #[async_trait] + impl Switch for TopologyWithSwitch { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let mut switch_configured = self.switch_setup.lock().unwrap(); + *switch_configured = true; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + _mac_address: &MacAddress, + ) -> Result, SwitchError> { + let mut ports = self.available_ports.lock().unwrap(); + if ports.is_empty() { + return Ok(None); + } + Ok(Some(ports.remove(0))) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + let mut configured_host_networks = self.configured_host_networks.lock().unwrap(); + configured_host_networks.push((host.id.clone(), config.clone())); + + Ok(()) + } + } +} diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index 1bd4514..a12f132 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -19,3 +19,5 @@ pub use bootstrap_03_control_plane::*; pub use bootstrap_04_workers::*; pub use bootstrap_05_sanity_check::*; pub use bootstrap_06_installation_report::*; +pub mod crd; +pub mod host_network; diff --git a/harmony_composer/src/main.rs b/harmony_composer/src/main.rs index 4119460..817bb8b 100644 --- a/harmony_composer/src/main.rs +++ b/harmony_composer/src/main.rs @@ -54,6 +54,9 @@ struct DeployArgs { #[arg(long = "profile", short = 'p', default_value = "dev")] harmony_profile: HarmonyProfile, + + #[arg(long = "dry-run", short = 'd', default_value = "false")] + dry_run: bool, } #[derive(Args, Clone, Debug)] @@ -178,6 +181,7 @@ async fn main() { command .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}")) .env("HARMONY_PROFILE", format!("{}", args.harmony_profile)) + .env("HARMONY_DRY_RUN", format!("{}", args.dry_run)) .arg("-y") .arg("-a"); From a815f6ac9c06a1a65ba9f6e101e6c12248fedd14 Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 20 Oct 2025 11:44:11 -0400 Subject: [PATCH 05/12] feat: scrape targets to be able to get snmp alerts from machines to prometheus --- .../topology/oberservability/monitoring.rs | 9 +- .../kube_prometheus/crd/crd_scrape_config.rs | 187 ++++++++++++++++++ .../monitoring/kube_prometheus/crd/mod.rs | 1 + .../helm_prometheus_alert_score.rs | 1 + harmony/src/modules/monitoring/mod.rs | 1 + .../modules/monitoring/scrape_target/mod.rs | 1 + .../monitoring/scrape_target/server.rs | 76 +++++++ 7 files changed, 274 insertions(+), 2 deletions(-) create mode 100644 harmony/src/modules/monitoring/kube_prometheus/crd/crd_scrape_config.rs create mode 100644 harmony/src/modules/monitoring/scrape_target/mod.rs create mode 100644 harmony/src/modules/monitoring/scrape_target/server.rs diff --git a/harmony/src/domain/topology/oberservability/monitoring.rs b/harmony/src/domain/topology/oberservability/monitoring.rs index 1489e83..d9fe4d0 100644 --- a/harmony/src/domain/topology/oberservability/monitoring.rs +++ b/harmony/src/domain/topology/oberservability/monitoring.rs @@ -21,6 +21,7 @@ pub struct AlertingInterpret { pub sender: S, pub receivers: Vec>>, pub rules: Vec>>, + pub scrape_target: Vec>>, } #[async_trait] @@ -38,6 +39,10 @@ impl, T: Topology> Interpret for AlertingInte debug!("installing rule: {:#?}", rule); rule.install(&self.sender).await?; } + for target in self.scrape_target.iter() { + debug!("installing scrape_target: {:#?}", target); + target.install(&self.sender).await?; + } self.sender.ensure_installed(inventory, topology).await?; Ok(Outcome::success(format!( "successfully installed alert sender {}", @@ -77,6 +82,6 @@ pub trait AlertRule: std::fmt::Debug + Send + Sync { } #[async_trait] -pub trait ScrapeTarget { - async fn install(&self, sender: &S) -> Result<(), InterpretError>; +pub trait ScrapeTarget: std::fmt::Debug + Send + Sync { + async fn install(&self, sender: &S) -> Result; } diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/crd_scrape_config.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/crd_scrape_config.rs new file mode 100644 index 0000000..24a2833 --- /dev/null +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/crd_scrape_config.rs @@ -0,0 +1,187 @@ +use std::net::IpAddr; + +use async_trait::async_trait; +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + modules::monitoring::kube_prometheus::crd::{ + crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector, + }, + topology::oberservability::monitoring::ScrapeTarget, +}; + +#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[kube( + group = "monitoring.coreos.com", + version = "v1alpha1", + kind = "ScrapeConfig", + plural = "scrapeconfigs", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct ScrapeConfigSpec { + /// List of static configurations. + pub static_configs: Option>, + + /// Kubernetes service discovery. + pub kubernetes_sd_configs: Option>, + + /// HTTP-based service discovery. + pub http_sd_configs: Option>, + + /// File-based service discovery. + pub file_sd_configs: Option>, + + /// DNS-based service discovery. + pub dns_sd_configs: Option>, + + /// Consul service discovery. + pub consul_sd_configs: Option>, + + /// Relabeling configuration applied to discovered targets. + pub relabel_configs: Option>, + + /// Metric relabeling configuration applied to scraped samples. + pub metric_relabel_configs: Option>, + + /// Path to scrape metrics from (defaults to `/metrics`). + pub metrics_path: Option, + + /// Interval at which Prometheus scrapes targets (e.g., "30s"). + pub scrape_interval: Option, + + /// Timeout for scraping (e.g., "10s"). + pub scrape_timeout: Option, + + /// Optional job name override. + pub job_name: Option, + + /// Optional scheme (http or https). + pub scheme: Option, + + /// Authorization paramaters for snmp walk + pub params: Option, +} + +/// Static configuration section of a ScrapeConfig. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct StaticConfig { + pub targets: Vec, + + pub labels: Option, +} + +/// Relabeling configuration for target or metric relabeling. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct RelabelConfig { + pub source_labels: Option>, + pub separator: Option, + pub target_label: Option, + pub regex: Option, + pub modulus: Option, + pub replacement: Option, + pub action: Option, +} + +/// Kubernetes service discovery configuration. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct KubernetesSDConfig { + ///"pod", "service", "endpoints"pub role: String, + pub namespaces: Option, + pub selectors: Option>, + pub api_server: Option, + pub bearer_token_file: Option, + pub tls_config: Option, +} + +/// Namespace selector for Kubernetes service discovery. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct NamespaceSelector { + pub any: Option, + pub match_names: Option>, +} + +/// HTTP-based service discovery configuration. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct HttpSDConfig { + pub url: String, + pub refresh_interval: Option, + pub basic_auth: Option, + pub authorization: Option, + pub tls_config: Option, +} + +/// File-based service discovery configuration. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct FileSDConfig { + pub files: Vec, + pub refresh_interval: Option, +} + +/// DNS-based service discovery configuration. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct DnsSDConfig { + pub names: Vec, + pub refresh_interval: Option, + pub type_: Option, // SRV, A, AAAA + pub port: Option, +} + +/// Consul service discovery configuration. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ConsulSDConfig { + pub server: String, + pub services: Option>, + pub scheme: Option, + pub datacenter: Option, + pub tag_separator: Option, + pub refresh_interval: Option, + pub tls_config: Option, +} + +/// Basic authentication credentials. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct BasicAuth { + pub username: String, + pub password: Option, + pub password_file: Option, +} + +/// Bearer token or other auth mechanisms. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Authorization { + pub credentials: Option, + pub credentials_file: Option, + pub type_: Option, +} + +/// TLS configuration for secure scraping. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct TLSConfig { + pub ca_file: Option, + pub cert_file: Option, + pub key_file: Option, + pub server_name: Option, + pub insecure_skip_verify: Option, +} + +/// Authorization parameters for SNMP walk. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Params { + pub auth: Option>, + pub module: Option>, +} diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs index 4dbea74..c8cb854 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/mod.rs @@ -4,6 +4,7 @@ pub mod crd_default_rules; pub mod crd_grafana; pub mod crd_prometheus_rules; pub mod crd_prometheuses; +pub mod crd_scrape_config; pub mod grafana_default_dashboard; pub mod grafana_operator; pub mod prometheus_operator; diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs index c9a0c04..da26b03 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs @@ -31,6 +31,7 @@ impl Score for HelmPrometheusAlert sender: KubePrometheus { config }, receivers: self.receivers.clone(), rules: self.rules.clone(), + scrape_target: vec![], }) } fn name(&self) -> String { diff --git a/harmony/src/modules/monitoring/mod.rs b/harmony/src/modules/monitoring/mod.rs index edda516..7f07d5a 100644 --- a/harmony/src/modules/monitoring/mod.rs +++ b/harmony/src/modules/monitoring/mod.rs @@ -6,3 +6,4 @@ pub mod kube_prometheus; pub mod ntfy; pub mod okd; pub mod prometheus; +pub mod scrape_target; diff --git a/harmony/src/modules/monitoring/scrape_target/mod.rs b/harmony/src/modules/monitoring/scrape_target/mod.rs new file mode 100644 index 0000000..74f47ad --- /dev/null +++ b/harmony/src/modules/monitoring/scrape_target/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/harmony/src/modules/monitoring/scrape_target/server.rs b/harmony/src/modules/monitoring/scrape_target/server.rs new file mode 100644 index 0000000..ba41f49 --- /dev/null +++ b/harmony/src/modules/monitoring/scrape_target/server.rs @@ -0,0 +1,76 @@ +use std::net::IpAddr; + +use async_trait::async_trait; +use kube::api::ObjectMeta; +use serde::Serialize; + +use crate::{ + interpret::{InterpretError, Outcome}, + modules::monitoring::kube_prometheus::crd::{ + crd_alertmanager_config::CRDPrometheus, + crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig}, + }, + topology::oberservability::monitoring::ScrapeTarget, +}; + +#[derive(Debug, Clone, Serialize)] +pub struct Server { + pub name: String, + pub ip: IpAddr, + pub auth: String, + pub module: String, + pub domain: String, +} + +#[async_trait] +impl ScrapeTarget for Server { + async fn install(&self, sender: &CRDPrometheus) -> Result { + let scrape_config_spec = ScrapeConfigSpec { + static_configs: Some(vec![StaticConfig { + targets: vec![self.ip.to_string()], + labels: None, + }]), + scrape_interval: Some("2m".to_string()), + kubernetes_sd_configs: None, + http_sd_configs: None, + file_sd_configs: None, + dns_sd_configs: None, + params: Some(Params { + auth: Some(vec![self.auth.clone()]), + module: Some(vec![self.module.clone()]), + }), + consul_sd_configs: None, + relabel_configs: Some(vec![RelabelConfig { + action: None, + source_labels: Some(vec!["__address__".to_string()]), + separator: None, + target_label: Some("__param_target".to_string()), + regex: None, + replacement: Some(format!("snmp.{}:31080", self.domain.clone())), + modulus: None, + }]), + metric_relabel_configs: None, + metrics_path: Some("/snmp".to_string()), + scrape_timeout: Some("2m".to_string()), + job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())), + scheme: None, + }; + + let scrape_config = ScrapeConfig { + metadata: ObjectMeta { + name: Some(self.name.clone()), + namespace: Some(sender.namespace.clone()), + ..Default::default() + }, + spec: scrape_config_spec, + }; + sender + .client + .apply(&scrape_config, Some(&sender.namespace.clone())) + .await?; + Ok(Outcome::success(format!( + "installed scrape target {}", + self.name.clone() + ))) + } +} From cb66b7592ec52ae14f95284669e4aaa159a8dabd Mon Sep 17 00:00:00 2001 From: Willem Date: Mon, 20 Oct 2025 14:44:37 -0400 Subject: [PATCH 06/12] fix: made targets plural and changed scrape targets to option in AlertingInterpret --- .../src/domain/topology/oberservability/monitoring.rs | 10 ++++++---- .../kube_prometheus/helm_prometheus_alert_score.rs | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/harmony/src/domain/topology/oberservability/monitoring.rs b/harmony/src/domain/topology/oberservability/monitoring.rs index d9fe4d0..8a1368f 100644 --- a/harmony/src/domain/topology/oberservability/monitoring.rs +++ b/harmony/src/domain/topology/oberservability/monitoring.rs @@ -21,7 +21,7 @@ pub struct AlertingInterpret { pub sender: S, pub receivers: Vec>>, pub rules: Vec>>, - pub scrape_target: Vec>>, + pub scrape_targets: Option>>>, } #[async_trait] @@ -39,9 +39,11 @@ impl, T: Topology> Interpret for AlertingInte debug!("installing rule: {:#?}", rule); rule.install(&self.sender).await?; } - for target in self.scrape_target.iter() { - debug!("installing scrape_target: {:#?}", target); - target.install(&self.sender).await?; + if let Some(targets) = &self.scrape_targets { + for target in targets.iter() { + debug!("installing scrape_target: {:#?}", target); + target.install(&self.sender).await?; + } } self.sender.ensure_installed(inventory, topology).await?; Ok(Outcome::success(format!( diff --git a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs index da26b03..468d308 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/helm_prometheus_alert_score.rs @@ -31,7 +31,7 @@ impl Score for HelmPrometheusAlert sender: KubePrometheus { config }, receivers: self.receivers.clone(), rules: self.rules.clone(), - scrape_target: vec![], + scrape_targets: None, }) } fn name(&self) -> String { From ed7f81aa1f1dcc93e65a586c87c1f2c520740b0a Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Mon, 20 Oct 2025 19:18:49 +0000 Subject: [PATCH 07/12] fix(opnsense-config): ensure load balancer service configuration is idempotent (#129) The previous implementation blindly added HAProxy components without checking for existing configurations on the same port, which caused duplicate entries and errors when a service was updated. This commit refactors the logic to a robust "remove-then-add" strategy. The configure_service method now finds and removes any existing frontend and its dependent components (backend, servers, health check) before adding the new, complete service definition. This change makes the process fully idempotent, preventing configuration drift and ensuring a predictable state. Co-authored-by: Ian Letourneau Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/129 --- Cargo.lock | 3 + Cargo.toml | 3 +- harmony/src/domain/topology/load_balancer.rs | 8 +- harmony/src/infra/opnsense/load_balancer.rs | 53 ++- .../modules/okd/bootstrap_load_balancer.rs | 2 + opnsense-config-xml/src/data/haproxy.rs | 16 +- opnsense-config/Cargo.toml | 1 + opnsense-config/src/config/manager/ssh.rs | 3 +- opnsense-config/src/config/shell/mod.rs | 6 +- opnsense-config/src/modules/load_balancer.rs | 332 +++++++++++++++++- 10 files changed, 360 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b5972f..429f09b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1918,6 +1918,8 @@ dependencies = [ "env_logger", "harmony", "harmony_macros", + "harmony_secret", + "harmony_secret_derive", "harmony_tui", "harmony_types", "log", @@ -3918,6 +3920,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" name = "opnsense-config" version = "0.1.0" dependencies = [ + "assertor", "async-trait", "chrono", "env_logger", diff --git a/Cargo.toml b/Cargo.toml index a10bf81..a256234 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,8 @@ members = [ "harmony_inventory_agent", "harmony_secret_derive", "harmony_secret", - "adr/agent_discovery/mdns", "brocade", + "adr/agent_discovery/mdns", + "brocade", ] [workspace.package] diff --git a/harmony/src/domain/topology/load_balancer.rs b/harmony/src/domain/topology/load_balancer.rs index 901602b..59c5add 100644 --- a/harmony/src/domain/topology/load_balancer.rs +++ b/harmony/src/domain/topology/load_balancer.rs @@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync { &self, service: &LoadBalancerService, ) -> Result<(), ExecutorError> { - debug!( - "Listing LoadBalancer services {:?}", - self.list_services().await - ); - if !self.list_services().await.contains(service) { - self.add_service(service).await?; - } + self.add_service(service).await?; Ok(()) } } diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index ce47f05..3df7511 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall { } async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { - warn!( - "TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here" - ); let mut config = self.opnsense_config.write().await; + let mut load_balancer = config.load_balancer(); + let (frontend, backend, servers, healthcheck) = harmony_load_balancer_service_to_haproxy_xml(service); - let mut load_balancer = config.load_balancer(); - load_balancer.add_backend(backend); - load_balancer.add_frontend(frontend); - load_balancer.add_servers(servers); - if let Some(healthcheck) = healthcheck { - load_balancer.add_healthcheck(healthcheck); - } + + load_balancer.configure_service(frontend, backend, servers, healthcheck); Ok(()) } @@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( .backends .backends .iter() - .find(|b| b.uuid == frontend.default_backend); + .find(|b| Some(b.uuid.clone()) == frontend.default_backend); let mut health_check = None; match matching_backend { @@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( } None => { warn!( - "HAProxy config could not find a matching backend for frontend {:?}", - frontend + "HAProxy config could not find a matching backend for frontend {frontend:?}" ); } } @@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend( .servers .iter() .filter_map(|server| { + let address = server.address.clone()?; + let port = server.port?; + if backend_servers.contains(&server.uuid.as_str()) { - return Some(BackendServer { - address: server.address.clone(), - port: server.port, - }); + return Some(BackendServer { address, port }); } None }) @@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( name: format!("frontend_{}", service.listening_port), bind: service.listening_port.to_string(), mode: "tcp".to_string(), // TODO do not depend on health check here - default_backend: backend.uuid.clone(), + default_backend: Some(backend.uuid.clone()), ..Default::default() }; info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); @@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer { uuid: Uuid::new_v4().to_string(), name: format!("{}_{}", &server.address, &server.port), enabled: 1, - address: server.address.clone(), - port: server.port, + address: Some(server.address.clone()), + port: Some(server.port), mode: "active".to_string(), server_type: "static".to_string(), ..Default::default() @@ -385,8 +378,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -411,8 +404,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -431,8 +424,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -453,16 +446,16 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "some-hostname.test.mcd".to_string(), - port: 80, + address: Some("some-hostname.test.mcd".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); let server = HAProxyServer { uuid: "server2".to_string(), - address: "192.168.1.2".to_string(), - port: 8080, + address: Some("192.168.1.2".to_string()), + port: Some(8080), ..Default::default() }; haproxy.servers.servers.push(server); diff --git a/harmony/src/modules/okd/bootstrap_load_balancer.rs b/harmony/src/modules/okd/bootstrap_load_balancer.rs index ccc69c9..e99fe97 100644 --- a/harmony/src/modules/okd/bootstrap_load_balancer.rs +++ b/harmony/src/modules/okd/bootstrap_load_balancer.rs @@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore { address: topology.bootstrap_host.ip.to_string(), port, }); + + backend.dedup(); backend } } diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index ef631f3..b0aedc2 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId { } } -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct HAProxyId(String); impl Default for HAProxyId { @@ -297,7 +297,7 @@ pub struct HAProxyFrontends { pub frontend: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Frontend { #[yaserde(attribute = true)] pub uuid: String, @@ -310,7 +310,7 @@ pub struct Frontend { pub bind_options: MaybeString, pub mode: String, #[yaserde(rename = "defaultBackend")] - pub default_backend: String, + pub default_backend: Option, pub ssl_enabled: i32, pub ssl_certificates: MaybeString, pub ssl_default_certificate: MaybeString, @@ -416,7 +416,7 @@ pub struct HAProxyBackends { pub backends: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyBackend { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -535,7 +535,7 @@ pub struct HAProxyServers { pub servers: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyServer { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -543,8 +543,8 @@ pub struct HAProxyServer { pub enabled: u8, pub name: String, pub description: MaybeString, - pub address: String, - pub port: u16, + pub address: Option, + pub port: Option, pub checkport: MaybeString, pub mode: String, pub multiplexer_protocol: MaybeString, @@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks { pub healthchecks: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyHealthCheck { #[yaserde(attribute = true)] pub uuid: String, diff --git a/opnsense-config/Cargo.toml b/opnsense-config/Cargo.toml index 0580cb2..bb682df 100644 --- a/opnsense-config/Cargo.toml +++ b/opnsense-config/Cargo.toml @@ -25,6 +25,7 @@ sha2 = "0.10.9" [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] } diff --git a/opnsense-config/src/config/manager/ssh.rs b/opnsense-config/src/config/manager/ssh.rs index 4b2fe64..afe232f 100644 --- a/opnsense-config/src/config/manager/ssh.rs +++ b/opnsense-config/src/config/manager/ssh.rs @@ -30,8 +30,7 @@ impl SshConfigManager { self.opnsense_shell .exec(&format!( - "cp /conf/config.xml /conf/backup/{}", - backup_filename + "cp /conf/config.xml /conf/backup/{backup_filename}" )) .await } diff --git a/opnsense-config/src/config/shell/mod.rs b/opnsense-config/src/config/shell/mod.rs index aa03837..aa94ff1 100644 --- a/opnsense-config/src/config/shell/mod.rs +++ b/opnsense-config/src/config/shell/mod.rs @@ -1,9 +1,7 @@ mod ssh; -pub use ssh::*; - -use async_trait::async_trait; - use crate::Error; +use async_trait::async_trait; +pub use ssh::*; #[async_trait] pub trait OPNsenseShell: std::fmt::Debug + Send + Sync { diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 6c71ed4..00cb364 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -1,11 +1,8 @@ -use std::sync::Arc; - -use log::warn; +use crate::{config::OPNsenseShell, Error}; use opnsense_config_xml::{ Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense, }; - -use crate::{config::OPNsenseShell, Error}; +use std::{collections::HashSet, sync::Arc}; pub struct LoadBalancerConfig<'a> { opnsense: &'a mut OPNsense, @@ -31,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> { match &mut self.opnsense.opnsense.haproxy.as_mut() { Some(haproxy) => f(haproxy), None => unimplemented!( - "Adding a backend is not supported when haproxy config does not exist yet" + "Cannot configure load balancer when haproxy config does not exist yet" ), } } @@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> { self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32); } - pub fn add_backend(&mut self, backend: HAProxyBackend) { - warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks"); - self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend)); + /// Configures a service by removing any existing service on the same port + /// and then adding the new definition. This ensures idempotency. + pub fn configure_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { + self.remove_service_by_bind_address(&frontend.bind); + self.remove_servers(&servers); + + self.add_new_service(frontend, backend, servers, healthcheck); } - pub fn add_frontend(&mut self, frontend: Frontend) { - self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend)); + // Remove the corresponding real servers based on their name if they already exist. + fn remove_servers(&mut self, servers: &[HAProxyServer]) { + let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect(); + self.with_haproxy(|haproxy| { + haproxy + .servers + .servers + .retain(|s| !server_names.contains(&s.name)); + }); } - pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) { - self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck)); + /// Removes a service and its dependent components based on the frontend's bind address. + /// This performs a cascading delete of the frontend, backend, servers, and health check. + fn remove_service_by_bind_address(&mut self, bind_address: &str) { + self.with_haproxy(|haproxy| { + let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else { + return; + }; + + let Some(old_backend) = remove_backend(haproxy, old_frontend) else { + return; + }; + + remove_healthcheck(haproxy, &old_backend); + remove_linked_servers(haproxy, &old_backend); + }); } - pub fn add_servers(&mut self, mut servers: Vec) { - self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers)); + /// Adds the components of a new service to the HAProxy configuration. + /// This function de-duplicates servers by name to prevent configuration errors. + fn add_new_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { + self.with_haproxy(|haproxy| { + if let Some(check) = healthcheck { + haproxy.healthchecks.healthchecks.push(check); + } + + haproxy.servers.servers.extend(servers); + haproxy.backends.backends.push(backend); + haproxy.frontends.frontend.push(frontend); + }); } pub async fn reload_restart(&self) -> Result<(), Error> { @@ -82,3 +125,262 @@ impl<'a> LoadBalancerConfig<'a> { Ok(()) } } + +fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option { + let pos = haproxy + .frontends + .frontend + .iter() + .position(|f| f.bind == bind_address); + + match pos { + Some(pos) => Some(haproxy.frontends.frontend.remove(pos)), + None => None, + } +} + +fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option { + let default_backend = old_frontend.default_backend?; + let pos = haproxy + .backends + .backends + .iter() + .position(|b| b.uuid == default_backend); + + match pos { + Some(pos) => Some(haproxy.backends.backends.remove(pos)), + None => None, // orphaned frontend, shouldn't happen + } +} + +fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(uuid) = &backend.health_check.content { + haproxy + .healthchecks + .healthchecks + .retain(|h| h.uuid != *uuid); + } +} + +/// Remove the backend's servers. This assumes servers are not shared between services. +fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(server_uuids_str) = &backend.linked_servers.content { + let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect(); + haproxy + .servers + .servers + .retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str())); + } +} + +#[cfg(test)] +mod tests { + use crate::config::DummyOPNSenseShell; + use assertor::*; + use opnsense_config_xml::{ + Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck, + HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense, + }; + use std::sync::Arc; + + use super::LoadBalancerConfig; + + static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80"; + static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443"; + + static SERVER_ADDRESS: &str = "1.1.1.1:80"; + static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443"; + + #[test] + fn configure_service_should_add_all_service_components_to_haproxy() { + let mut opnsense = given_opnsense(); + let mut load_balancer = given_load_balancer(&mut opnsense); + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + + load_balancer.configure_service( + frontend.clone(), + backend.clone(), + servers.clone(), + Some(healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend], + vec![backend], + servers, + vec![healthcheck], + ); + } + + #[test] + fn configure_service_should_replace_service_on_same_bind_address() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) = + given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + + load_balancer.configure_service( + updated_frontend.clone(), + updated_backend.clone(), + updated_servers.clone(), + Some(updated_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![updated_frontend], + vec![updated_backend], + updated_servers, + vec![updated_healthcheck], + ); + } + + #[test] + fn configure_service_should_keep_existing_service_on_different_bind_addresses() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let (other_healthcheck, other_servers, other_backend, other_frontend) = + given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + load_balancer.configure_service( + other_frontend.clone(), + other_backend.clone(), + other_servers.clone(), + Some(other_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend, other_frontend], + vec![backend, other_backend], + [servers, other_servers].concat(), + vec![healthcheck, other_healthcheck], + ); + } + + fn assert_haproxy_configured_with( + opnsense: OPNsense, + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) { + let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap(); + assert_that!(haproxy.frontends.frontend).contains_exactly(frontends); + assert_that!(haproxy.backends.backends).contains_exactly(backends); + assert_that!(haproxy.servers.servers).is_equal_to(servers); + assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks); + } + + fn given_opnsense() -> OPNsense { + OPNsense::default() + } + + fn given_opnsense_with(haproxy: HAProxy) -> OPNsense { + let mut opnsense = OPNsense::default(); + opnsense.opnsense.haproxy = Some(haproxy); + + opnsense + } + + fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> { + let opnsense_shell = Arc::new(DummyOPNSenseShell {}); + if opnsense.opnsense.haproxy.is_none() { + opnsense.opnsense.haproxy = Some(HAProxy::default()); + } + LoadBalancerConfig::new(opnsense, opnsense_shell) + } + + fn given_service( + bind_address: &str, + server_address: &str, + ) -> ( + HAProxyHealthCheck, + Vec, + HAProxyBackend, + Frontend, + ) { + let healthcheck = given_healthcheck(); + let servers = vec![given_server(server_address)]; + let backend = given_backend(); + let frontend = given_frontend(bind_address); + (healthcheck, servers, backend, frontend) + } + + fn given_haproxy( + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) -> HAProxy { + HAProxy { + frontends: HAProxyFrontends { + frontend: frontends, + }, + backends: HAProxyBackends { backends }, + servers: HAProxyServers { servers }, + healthchecks: HAProxyHealthChecks { healthchecks }, + ..Default::default() + } + } + + fn given_frontend(bind_address: &str) -> Frontend { + Frontend { + uuid: "uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: format!("frontend_{bind_address}"), + bind: bind_address.into(), + default_backend: Some("backend-uuid".into()), + ..Default::default() + } + } + + fn given_backend() -> HAProxyBackend { + HAProxyBackend { + uuid: "backend-uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: "backend_192.168.1.1:80".into(), + linked_servers: MaybeString::from("server-uuid"), + health_check_enabled: 1, + health_check: MaybeString::from("healthcheck-uuid"), + ..Default::default() + } + } + + fn given_server(address: &str) -> HAProxyServer { + HAProxyServer { + uuid: "server-uuid".into(), + id: HAProxyId::default(), + name: address.into(), + address: Some(address.into()), + ..Default::default() + } + } + + fn given_healthcheck() -> HAProxyHealthCheck { + HAProxyHealthCheck { + uuid: "healthcheck-uuid".into(), + name: "healthcheck".into(), + ..Default::default() + } + } +} From 2a48d51479a5ad004aa8e9b47c0a08bd9816eb21 Mon Sep 17 00:00:00 2001 From: Willem Date: Tue, 21 Oct 2025 11:09:45 -0400 Subject: [PATCH 08/12] fix: naming of k8s distribution --- harmony/src/domain/topology/k8s_anywhere.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index 5e448b8..1c2f764 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -64,7 +64,7 @@ enum K8sSource { pub struct K8sAnywhereTopology { k8s_state: Arc>>, tenant_manager: Arc>, - flavour: Arc>, + k8s_distribution: Arc>, config: Arc, } @@ -170,7 +170,7 @@ impl K8sAnywhereTopology { Self { k8s_state: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()), - flavour: Arc::new(OnceCell::new()), + k8s_distribution: Arc::new(OnceCell::new()), config: Arc::new(K8sAnywhereConfig::from_env()), } } @@ -179,13 +179,13 @@ impl K8sAnywhereTopology { Self { k8s_state: Arc::new(OnceCell::new()), tenant_manager: Arc::new(OnceCell::new()), - flavour: Arc::new(OnceCell::new()), + k8s_distribution: Arc::new(OnceCell::new()), config: Arc::new(config), } } pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> { - self.flavour + self.k8s_distribution .get_or_try_init(async || { let client = self.k8s_client().await.unwrap(); From 14d1823d153ee3d2d87572fcc31ebb259db6f0e1 Mon Sep 17 00:00:00 2001 From: Willem Date: Tue, 21 Oct 2025 15:54:51 +0000 Subject: [PATCH 09/12] fix: remove ceph osd deletes and purges osd from ceph osd tree\ (#120) k8s returns None rather than zero when checking deployment for replicas exec_app requires commands 's' and '-c' to run correctly Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/120 Co-authored-by: Willem Co-committed-by: Willem --- Cargo.lock | 9 +++ examples/remove_rook_osd/Cargo.toml | 11 ++++ examples/remove_rook_osd/src/main.rs | 18 ++++++ harmony/src/domain/interpret/mod.rs | 5 ++ harmony/src/domain/topology/k8s.rs | 7 ++- ...ment_score.rs => ceph_remove_osd_score.rs} | 60 ++++++++++--------- harmony/src/modules/storage/ceph/mod.rs | 2 +- 7 files changed, 80 insertions(+), 32 deletions(-) create mode 100644 examples/remove_rook_osd/Cargo.toml create mode 100644 examples/remove_rook_osd/src/main.rs rename harmony/src/modules/storage/ceph/{ceph_osd_replacement_score.rs => ceph_remove_osd_score.rs} (90%) diff --git a/Cargo.lock b/Cargo.lock index 429f09b..666fe3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4613,6 +4613,15 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" +[[package]] +name = "remove_rook_osd" +version = "0.1.0" +dependencies = [ + "harmony", + "harmony_cli", + "tokio", +] + [[package]] name = "reqwest" version = "0.11.27" diff --git a/examples/remove_rook_osd/Cargo.toml b/examples/remove_rook_osd/Cargo.toml new file mode 100644 index 0000000..6e35ac0 --- /dev/null +++ b/examples/remove_rook_osd/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "example-remove-rook-osd" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +harmony = { version = "0.1.0", path = "../../harmony" } +harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } +tokio.workspace = true diff --git a/examples/remove_rook_osd/src/main.rs b/examples/remove_rook_osd/src/main.rs new file mode 100644 index 0000000..2794927 --- /dev/null +++ b/examples/remove_rook_osd/src/main.rs @@ -0,0 +1,18 @@ +use harmony::{ + inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd, + topology::K8sAnywhereTopology, +}; + +#[tokio::main] +async fn main() { + let ceph_score = CephRemoveOsd { + osd_deployment_name: "rook-ceph-osd-2".to_string(), + rook_ceph_namespace: "rook-ceph".to_string(), + }; + + let topology = K8sAnywhereTopology::from_env(); + let inventory = Inventory::autoload(); + harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None) + .await + .unwrap(); +} diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index d555d9e..ad3dac3 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -30,6 +30,7 @@ pub enum InterpretName { Lamp, ApplicationMonitoring, K8sPrometheusCrdAlerting, + CephRemoveOsd, DiscoverInventoryAgent, CephClusterHealth, Custom(&'static str), @@ -61,7 +62,11 @@ impl std::fmt::Display for InterpretName { InterpretName::Lamp => f.write_str("LAMP"), InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), +<<<<<<< HEAD + InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"), +======= InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), +>>>>>>> origin/master InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), InterpretName::Custom(name) => f.write_str(name), InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index 5a1e6ec..1fd41e9 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -21,7 +21,7 @@ use kube::{ api::{ApiResource, GroupVersionKind}, runtime::wait::await_condition, }; -use log::{debug, error, trace}; +use log::{debug, error, info, trace}; use serde::{Serialize, de::DeserializeOwned}; use serde_json::{Value, json}; use similar::TextDiff; @@ -80,10 +80,13 @@ impl K8sClient { namespace: Option<&str>, ) -> Result, Error> { let deps: Api = if let Some(ns) = namespace { + debug!("getting namespaced deployment"); Api::namespaced(self.client.clone(), ns) } else { + debug!("getting default namespace deployment"); Api::default_namespaced(self.client.clone()) }; + debug!("getting deployment {} in ns {}", name, namespace.unwrap()); Ok(deps.get_opt(name).await?) } @@ -114,7 +117,7 @@ impl K8sClient { } }); let pp = PatchParams::default(); - let scale = Patch::Apply(&patch); + let scale = Patch::Merge(&patch); deployments.patch_scale(name, &pp, &scale).await?; Ok(()) } diff --git a/harmony/src/modules/storage/ceph/ceph_osd_replacement_score.rs b/harmony/src/modules/storage/ceph/ceph_remove_osd_score.rs similarity index 90% rename from harmony/src/modules/storage/ceph/ceph_osd_replacement_score.rs rename to harmony/src/modules/storage/ceph/ceph_remove_osd_score.rs index 77dd24a..787f9cc 100644 --- a/harmony/src/modules/storage/ceph/ceph_osd_replacement_score.rs +++ b/harmony/src/modules/storage/ceph/ceph_remove_osd_score.rs @@ -4,7 +4,7 @@ use std::{ }; use async_trait::async_trait; -use log::{info, warn}; +use log::{debug, warn}; use serde::{Deserialize, Serialize}; use tokio::time::sleep; @@ -19,8 +19,8 @@ use harmony_types::id::Id; #[derive(Debug, Clone, Serialize)] pub struct CephRemoveOsd { - osd_deployment_name: String, - rook_ceph_namespace: String, + pub osd_deployment_name: String, + pub rook_ceph_namespace: String, } impl Score for CephRemoveOsd { @@ -54,18 +54,17 @@ impl Interpret for CephRemoveOsdInterpret { self.verify_deployment_scaled(client.clone()).await?; self.delete_deployment(client.clone()).await?; self.verify_deployment_deleted(client.clone()).await?; - let osd_id_full = self.get_ceph_osd_id().unwrap(); - self.purge_ceph_osd(client.clone(), &osd_id_full).await?; - self.verify_ceph_osd_removal(client.clone(), &osd_id_full) - .await?; + self.purge_ceph_osd(client.clone()).await?; + self.verify_ceph_osd_removal(client.clone()).await?; + let osd_id_full = self.get_ceph_osd_id().unwrap(); Ok(Outcome::success(format!( "Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}", osd_id_full, self.score.osd_deployment_name ))) } fn get_name(&self) -> InterpretName { - todo!() + InterpretName::CephRemoveOsd } fn get_version(&self) -> Version { @@ -82,7 +81,7 @@ impl Interpret for CephRemoveOsdInterpret { } impl CephRemoveOsdInterpret { - pub fn get_ceph_osd_id(&self) -> Result { + pub fn get_ceph_osd_id_numeric(&self) -> Result { let osd_id_numeric = self .score .osd_deployment_name @@ -94,9 +93,14 @@ impl CephRemoveOsdInterpret { self.score.osd_deployment_name )) })?; + Ok(osd_id_numeric.to_string()) + } + + pub fn get_ceph_osd_id(&self) -> Result { + let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap(); let osd_id_full = format!("osd.{}", osd_id_numeric); - info!( + debug!( "Targeting Ceph OSD: {} (parsed from deployment {})", osd_id_full, self.score.osd_deployment_name ); @@ -108,6 +112,7 @@ impl CephRemoveOsdInterpret { &self, client: Arc, ) -> Result { + debug!("verifying toolbox exists"); let toolbox_dep = "rook-ceph-tools".to_string(); match client @@ -149,7 +154,7 @@ impl CephRemoveOsdInterpret { &self, client: Arc, ) -> Result { - info!( + debug!( "Scaling down OSD deployment: {}", self.score.osd_deployment_name ); @@ -172,7 +177,7 @@ impl CephRemoveOsdInterpret { ) -> Result { let (timeout, interval, start) = self.build_timer(); - info!("Waiting for OSD deployment to scale down to 0 replicas"); + debug!("Waiting for OSD deployment to scale down to 0 replicas"); loop { let dep = client .get_deployment( @@ -180,11 +185,9 @@ impl CephRemoveOsdInterpret { Some(&self.score.rook_ceph_namespace), ) .await?; - if let Some(deployment) = dep { if let Some(status) = deployment.status { - if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0 - { + if status.replicas == None && status.ready_replicas == None { return Ok(Outcome::success( "Deployment successfully scaled down.".to_string(), )); @@ -212,7 +215,7 @@ impl CephRemoveOsdInterpret { &self, client: Arc, ) -> Result { - info!( + debug!( "Deleting OSD deployment: {}", self.score.osd_deployment_name ); @@ -234,7 +237,7 @@ impl CephRemoveOsdInterpret { ) -> Result { let (timeout, interval, start) = self.build_timer(); - info!("Waiting for OSD deployment to scale down to 0 replicas"); + debug!("Verifying OSD deployment deleted"); loop { let dep = client .get_deployment( @@ -244,7 +247,7 @@ impl CephRemoveOsdInterpret { .await?; if dep.is_none() { - info!( + debug!( "Deployment {} successfully deleted.", self.score.osd_deployment_name ); @@ -276,12 +279,10 @@ impl CephRemoveOsdInterpret { Ok(tree) } - pub async fn purge_ceph_osd( - &self, - client: Arc, - osd_id_full: &str, - ) -> Result { - info!( + pub async fn purge_ceph_osd(&self, client: Arc) -> Result { + let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap(); + let osd_id_full = self.get_ceph_osd_id().unwrap(); + debug!( "Purging OSD {} from Ceph cluster and removing its auth key", osd_id_full ); @@ -291,8 +292,9 @@ impl CephRemoveOsdInterpret { "app".to_string(), Some(&self.score.rook_ceph_namespace), vec![ - format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(), - format!("ceph auth del osd.{osd_id_full}").as_str(), + "sh", + "-c", + format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(), ], ) .await?; @@ -305,10 +307,10 @@ impl CephRemoveOsdInterpret { pub async fn verify_ceph_osd_removal( &self, client: Arc, - osd_id_full: &str, ) -> Result { let (timeout, interval, start) = self.build_timer(); - info!( + let osd_id_full = self.get_ceph_osd_id().unwrap(); + debug!( "Verifying OSD {} has been removed from the Ceph tree...", osd_id_full ); @@ -318,7 +320,7 @@ impl CephRemoveOsdInterpret { "rook-ceph-tools".to_string(), "app".to_string(), Some(&self.score.rook_ceph_namespace), - vec!["ceph osd tree -f json"], + vec!["sh", "-c", "ceph osd tree -f json"], ) .await?; let tree = diff --git a/harmony/src/modules/storage/ceph/mod.rs b/harmony/src/modules/storage/ceph/mod.rs index 3e3250e..0a3dcec 100644 --- a/harmony/src/modules/storage/ceph/mod.rs +++ b/harmony/src/modules/storage/ceph/mod.rs @@ -1,2 +1,2 @@ -pub mod ceph_osd_replacement_score; +pub mod ceph_remove_osd_score; pub mod ceph_validate_health_score; From 987f195e2fb5a4da2614e825327eb6ef1da84a00 Mon Sep 17 00:00:00 2001 From: wjro Date: Tue, 21 Oct 2025 15:55:55 +0000 Subject: [PATCH 10/12] feat(cert-manager): add cluster issuer to okd cluster score (#157) added score to install okd cluster issuer Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/157 --- .../modules/cert_manager/cluster_issuer.rs | 209 ++++++++++++++++++ harmony/src/modules/cert_manager/mod.rs | 1 + 2 files changed, 210 insertions(+) create mode 100644 harmony/src/modules/cert_manager/cluster_issuer.rs diff --git a/harmony/src/modules/cert_manager/cluster_issuer.rs b/harmony/src/modules/cert_manager/cluster_issuer.rs new file mode 100644 index 0000000..70294fe --- /dev/null +++ b/harmony/src/modules/cert_manager/cluster_issuer.rs @@ -0,0 +1,209 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use harmony_types::id::Id; +use kube::{CustomResource, api::ObjectMeta}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{K8sclient, Topology, k8s::K8sClient}, +}; + +#[derive(Clone, Debug, Serialize)] +pub struct ClusterIssuerScore { + email: String, + server: String, + issuer_name: String, + namespace: String, +} + +impl Score for ClusterIssuerScore { + fn name(&self) -> String { + "ClusterIssuerScore".to_string() + } + + #[doc(hidden)] + fn create_interpret(&self) -> Box> { + Box::new(ClusterIssuerInterpret { + score: self.clone(), + }) + } +} + +#[derive(Debug, Clone)] +pub struct ClusterIssuerInterpret { + score: ClusterIssuerScore, +} + +#[async_trait] +impl Interpret for ClusterIssuerInterpret { + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + self.apply_cluster_issuer(topology.k8s_client().await.unwrap()) + .await + } + + fn get_name(&self) -> InterpretName { + InterpretName::Custom("ClusterIssuer") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} + +impl ClusterIssuerInterpret { + async fn validate_cert_manager( + &self, + client: &Arc, + ) -> Result { + let cert_manager = "cert-manager".to_string(); + let operator_namespace = "openshift-operators".to_string(); + match client + .get_deployment(&cert_manager, Some(&operator_namespace)) + .await + { + Ok(Some(deployment)) => { + if let Some(status) = deployment.status { + let ready_count = status.ready_replicas.unwrap_or(0); + if ready_count >= 1 { + return Ok(Outcome::success(format!( + "'{}' is ready with {} replica(s).", + &cert_manager, ready_count + ))); + } else { + return Err(InterpretError::new( + "cert-manager operator not ready in cluster".to_string(), + )); + } + } else { + Err(InterpretError::new(format!( + "failed to get deployment status {} in ns {}", + &cert_manager, &operator_namespace + ))) + } + } + Ok(None) => Err(InterpretError::new(format!( + "Deployment '{}' not found in namespace '{}'.", + &cert_manager, &operator_namespace + ))), + Err(e) => Err(InterpretError::new(format!( + "Failed to query for deployment '{}': {}", + &cert_manager, e + ))), + } + } + + fn build_cluster_issuer(&self) -> Result { + let issuer_name = &self.score.issuer_name; + let email = &self.score.email; + let server = &self.score.server; + let namespace = &self.score.namespace; + let cluster_issuer = ClusterIssuer { + metadata: ObjectMeta { + name: Some(issuer_name.to_string()), + namespace: Some(namespace.to_string()), + ..Default::default() + }, + spec: ClusterIssuerSpec { + acme: AcmeSpec { + email: email.to_string(), + private_key_secret_ref: PrivateKeySecretRef { + name: issuer_name.to_string(), + }, + server: server.to_string(), + solvers: vec![SolverSpec { + http01: Some(Http01Solver { + ingress: Http01Ingress { + class: "nginx".to_string(), + }, + }), + }], + }, + }, + }; + + Ok(cluster_issuer) + } + + pub async fn apply_cluster_issuer( + &self, + client: Arc, + ) -> Result { + let namespace = self.score.namespace.clone(); + self.validate_cert_manager(&client).await?; + let cluster_issuer = self.build_cluster_issuer().unwrap(); + client + .apply_yaml( + &serde_yaml::to_value(cluster_issuer).unwrap(), + Some(&namespace), + ) + .await?; + Ok(Outcome::success(format!( + "successfully deployed cluster operator: {} in namespace: {}", + self.score.issuer_name, self.score.namespace + ))) + } +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "cert-manager.io", + version = "v1", + kind = "ClusterIssuer", + plural = "clusterissuers" +)] +#[serde(rename_all = "camelCase")] +pub struct ClusterIssuerSpec { + pub acme: AcmeSpec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct AcmeSpec { + pub email: String, + pub private_key_secret_ref: PrivateKeySecretRef, + pub server: String, + pub solvers: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct PrivateKeySecretRef { + pub name: String, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct SolverSpec { + pub http01: Option, + // Other solver types (e.g., dns01) would go here as Options +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Http01Solver { + pub ingress: Http01Ingress, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Http01Ingress { + pub class: String, +} diff --git a/harmony/src/modules/cert_manager/mod.rs b/harmony/src/modules/cert_manager/mod.rs index 8fd309a..032439e 100644 --- a/harmony/src/modules/cert_manager/mod.rs +++ b/harmony/src/modules/cert_manager/mod.rs @@ -1,2 +1,3 @@ +pub mod cluster_issuer; mod helm; pub use helm::*; From 464347d3e5515e909b04cb8c9ae2578c50895116 Mon Sep 17 00:00:00 2001 From: Willem Date: Tue, 21 Oct 2025 12:01:31 -0400 Subject: [PATCH 11/12] fix: fixed merge error that somehow got missed --- harmony/src/domain/interpret/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index ad3dac3..f9a509d 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -62,11 +62,8 @@ impl std::fmt::Display for InterpretName { InterpretName::Lamp => f.write_str("LAMP"), InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), -<<<<<<< HEAD InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"), -======= InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), ->>>>>>> origin/master InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), InterpretName::Custom(name) => f.write_str(name), InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), From 73681849175c439dbeb7bd044e3896434ccb7e47 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Wed, 22 Oct 2025 15:12:53 -0400 Subject: [PATCH 12/12] fix(ha_cluster): inject switch client for better testability --- Cargo.lock | 27 +++++----- brocade/src/fast_iron.rs | 1 + brocade/src/lib.rs | 2 +- brocade/src/network_operating_system.rs | 1 + brocade/src/shell.rs | 1 + examples/nanodc/Cargo.toml | 2 + examples/nanodc/src/main.rs | 34 +++++++++++-- examples/okd_installation/Cargo.toml | 1 + examples/okd_installation/src/topology.rs | 31 +++++++++++- examples/okd_pxe/Cargo.toml | 1 + examples/okd_pxe/src/topology.rs | 34 +++++++++++-- examples/opnsense/Cargo.toml | 3 ++ examples/opnsense/src/main.rs | 33 ++++++++++++- harmony/src/domain/topology/ha_cluster.rs | 60 ++++++++++------------- harmony/src/domain/topology/network.rs | 32 +++++++----- harmony/src/infra/brocade.rs | 11 +---- 16 files changed, 197 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 666fe3a..7d9cdcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1780,6 +1780,7 @@ dependencies = [ name = "example-nanodc" version = "0.1.0" dependencies = [ + "brocade", "cidr", "env_logger", "harmony", @@ -1788,6 +1789,7 @@ dependencies = [ "harmony_tui", "harmony_types", "log", + "serde", "tokio", "url", ] @@ -1806,6 +1808,7 @@ dependencies = [ name = "example-okd-install" version = "0.1.0" dependencies = [ + "brocade", "cidr", "env_logger", "harmony", @@ -1836,13 +1839,16 @@ dependencies = [ name = "example-opnsense" version = "0.1.0" dependencies = [ + "brocade", "cidr", "env_logger", "harmony", "harmony_macros", + "harmony_secret", "harmony_tui", "harmony_types", "log", + "serde", "tokio", "url", ] @@ -1851,6 +1857,7 @@ dependencies = [ name = "example-pxe" version = "0.1.0" dependencies = [ + "brocade", "cidr", "env_logger", "harmony", @@ -1865,6 +1872,15 @@ dependencies = [ "url", ] +[[package]] +name = "example-remove-rook-osd" +version = "0.1.0" +dependencies = [ + "harmony", + "harmony_cli", + "tokio", +] + [[package]] name = "example-rust" version = "0.1.0" @@ -1918,8 +1934,6 @@ dependencies = [ "env_logger", "harmony", "harmony_macros", - "harmony_secret", - "harmony_secret_derive", "harmony_tui", "harmony_types", "log", @@ -4613,15 +4627,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" -[[package]] -name = "remove_rook_osd" -version = "0.1.0" -dependencies = [ - "harmony", - "harmony_cli", - "tokio", -] - [[package]] name = "reqwest" version = "0.11.27" diff --git a/brocade/src/fast_iron.rs b/brocade/src/fast_iron.rs index a1a2478..5a3474e 100644 --- a/brocade/src/fast_iron.rs +++ b/brocade/src/fast_iron.rs @@ -10,6 +10,7 @@ use log::{debug, info}; use regex::Regex; use std::{collections::HashSet, str::FromStr}; +#[derive(Debug)] pub struct FastIronClient { shell: BrocadeShell, version: BrocadeInfo, diff --git a/brocade/src/lib.rs b/brocade/src/lib.rs index 3822abd..57b464a 100644 --- a/brocade/src/lib.rs +++ b/brocade/src/lib.rs @@ -162,7 +162,7 @@ pub async fn init( } #[async_trait] -pub trait BrocadeClient { +pub trait BrocadeClient: std::fmt::Debug { /// Retrieves the operating system and version details from the connected Brocade switch. /// /// This is typically the first call made after establishing a connection to determine diff --git a/brocade/src/network_operating_system.rs b/brocade/src/network_operating_system.rs index b14bc08..0ee4a88 100644 --- a/brocade/src/network_operating_system.rs +++ b/brocade/src/network_operating_system.rs @@ -10,6 +10,7 @@ use crate::{ parse_brocade_mac_address, shell::BrocadeShell, }; +#[derive(Debug)] pub struct NetworkOperatingSystemClient { shell: BrocadeShell, version: BrocadeInfo, diff --git a/brocade/src/shell.rs b/brocade/src/shell.rs index cfa672d..28eceb8 100644 --- a/brocade/src/shell.rs +++ b/brocade/src/shell.rs @@ -13,6 +13,7 @@ use log::info; use russh::ChannelMsg; use tokio::time::timeout; +#[derive(Debug)] pub struct BrocadeShell { ip: IpAddr, port: u16, diff --git a/examples/nanodc/Cargo.toml b/examples/nanodc/Cargo.toml index 889c24d..3072ddf 100644 --- a/examples/nanodc/Cargo.toml +++ b/examples/nanodc/Cargo.toml @@ -17,3 +17,5 @@ harmony_secret = { path = "../../harmony_secret" } log = { workspace = true } env_logger = { workspace = true } url = { workspace = true } +serde = { workspace = true } +brocade = { path = "../../brocade" } diff --git a/examples/nanodc/src/main.rs b/examples/nanodc/src/main.rs index 57574d2..d00503f 100644 --- a/examples/nanodc/src/main.rs +++ b/examples/nanodc/src/main.rs @@ -3,12 +3,13 @@ use std::{ sync::Arc, }; +use brocade::BrocadeOptions; use cidr::Ipv4Cidr; use harmony::{ config::secret::SshKeyPair, data::{FileContent, FilePath}, hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, - infra::opnsense::OPNSenseManagementInterface, + infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, inventory::Inventory, modules::{ http::StaticFilesHttpScore, @@ -22,8 +23,9 @@ use harmony::{ topology::{LogicalHost, UnmanagedRouter}, }; use harmony_macros::{ip, mac_address}; -use harmony_secret::SecretManager; +use harmony_secret::{Secret, SecretManager}; use harmony_types::net::Url; +use serde::{Deserialize, Serialize}; #[tokio::main] async fn main() { @@ -32,6 +34,26 @@ async fn main() { name: String::from("fw0"), }; + let switch_auth = SecretManager::get_or_prompt::() + .await + .expect("Failed to get credentials"); + + let switches: Vec = vec![ip!("192.168.33.101")]; + let brocade_options = Some(BrocadeOptions { + dry_run: *harmony::config::DRY_RUN, + ..Default::default() + }); + let switch_client = BrocadeSwitchClient::init( + &switches, + &switch_auth.username, + &switch_auth.password, + brocade_options, + ) + .await + .expect("Failed to connect to switch"); + + let switch_client = Arc::new(switch_client); + let opnsense = Arc::new( harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, ); @@ -83,7 +105,7 @@ async fn main() { name: "wk2".to_string(), }, ], - switch: vec![], + switch_client: switch_client.clone(), }; let inventory = Inventory { @@ -166,3 +188,9 @@ async fn main() { .await .unwrap(); } + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} diff --git a/examples/okd_installation/Cargo.toml b/examples/okd_installation/Cargo.toml index 7314e4f..dfbe944 100644 --- a/examples/okd_installation/Cargo.toml +++ b/examples/okd_installation/Cargo.toml @@ -19,3 +19,4 @@ log = { workspace = true } env_logger = { workspace = true } url = { workspace = true } serde.workspace = true +brocade = { path = "../../brocade" } diff --git a/examples/okd_installation/src/topology.rs b/examples/okd_installation/src/topology.rs index 31062f5..617a3a8 100644 --- a/examples/okd_installation/src/topology.rs +++ b/examples/okd_installation/src/topology.rs @@ -1,7 +1,8 @@ +use brocade::BrocadeOptions; use cidr::Ipv4Cidr; use harmony::{ hardware::{Location, SwitchGroup}, - infra::opnsense::OPNSenseManagementInterface, + infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, }; @@ -22,6 +23,26 @@ pub async fn get_topology() -> HAClusterTopology { name: String::from("opnsense-1"), }; + let switch_auth = SecretManager::get_or_prompt::() + .await + .expect("Failed to get credentials"); + + let switches: Vec = vec![ip!("192.168.1.101")]; // TODO: Adjust me + let brocade_options = Some(BrocadeOptions { + dry_run: *harmony::config::DRY_RUN, + ..Default::default() + }); + let switch_client = BrocadeSwitchClient::init( + &switches, + &switch_auth.username, + &switch_auth.password, + brocade_options, + ) + .await + .expect("Failed to connect to switch"); + + let switch_client = Arc::new(switch_client); + let config = SecretManager::get_or_prompt::().await; let config = config.unwrap(); @@ -58,7 +79,7 @@ pub async fn get_topology() -> HAClusterTopology { name: "bootstrap".to_string(), }, workers: vec![], - switch: vec![], + switch_client: switch_client.clone(), } } @@ -75,3 +96,9 @@ pub fn get_inventory() -> Inventory { control_plane_host: vec![], } } + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} diff --git a/examples/okd_pxe/Cargo.toml b/examples/okd_pxe/Cargo.toml index f75f42b..133b2f9 100644 --- a/examples/okd_pxe/Cargo.toml +++ b/examples/okd_pxe/Cargo.toml @@ -19,3 +19,4 @@ log = { workspace = true } env_logger = { workspace = true } url = { workspace = true } serde.workspace = true +brocade = { path = "../../brocade" } diff --git a/examples/okd_pxe/src/topology.rs b/examples/okd_pxe/src/topology.rs index 707969a..0cf4b72 100644 --- a/examples/okd_pxe/src/topology.rs +++ b/examples/okd_pxe/src/topology.rs @@ -1,13 +1,15 @@ +use brocade::BrocadeOptions; use cidr::Ipv4Cidr; use harmony::{ config::secret::OPNSenseFirewallCredentials, hardware::{Location, SwitchGroup}, - infra::opnsense::OPNSenseManagementInterface, + infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, }; use harmony_macros::{ip, ipv4}; -use harmony_secret::SecretManager; +use harmony_secret::{Secret, SecretManager}; +use serde::{Deserialize, Serialize}; use std::{net::IpAddr, sync::Arc}; pub async fn get_topology() -> HAClusterTopology { @@ -16,6 +18,26 @@ pub async fn get_topology() -> HAClusterTopology { name: String::from("opnsense-1"), }; + let switch_auth = SecretManager::get_or_prompt::() + .await + .expect("Failed to get credentials"); + + let switches: Vec = vec![ip!("192.168.1.101")]; // TODO: Adjust me + let brocade_options = Some(BrocadeOptions { + dry_run: *harmony::config::DRY_RUN, + ..Default::default() + }); + let switch_client = BrocadeSwitchClient::init( + &switches, + &switch_auth.username, + &switch_auth.password, + brocade_options, + ) + .await + .expect("Failed to connect to switch"); + + let switch_client = Arc::new(switch_client); + let config = SecretManager::get_or_prompt::().await; let config = config.unwrap(); @@ -52,7 +74,7 @@ pub async fn get_topology() -> HAClusterTopology { name: "cp0".to_string(), }, workers: vec![], - switch: vec![], + switch_client: switch_client.clone(), } } @@ -69,3 +91,9 @@ pub fn get_inventory() -> Inventory { control_plane_host: vec![], } } + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} diff --git a/examples/opnsense/Cargo.toml b/examples/opnsense/Cargo.toml index 60986d3..1574f29 100644 --- a/examples/opnsense/Cargo.toml +++ b/examples/opnsense/Cargo.toml @@ -16,3 +16,6 @@ harmony_macros = { path = "../../harmony_macros" } log = { workspace = true } env_logger = { workspace = true } url = { workspace = true } +harmony_secret = { path = "../../harmony_secret" } +brocade = { path = "../../brocade" } +serde = { workspace = true } diff --git a/examples/opnsense/src/main.rs b/examples/opnsense/src/main.rs index fcfaf09..d03643b 100644 --- a/examples/opnsense/src/main.rs +++ b/examples/opnsense/src/main.rs @@ -3,10 +3,11 @@ use std::{ sync::Arc, }; +use brocade::BrocadeOptions; use cidr::Ipv4Cidr; use harmony::{ hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, - infra::opnsense::OPNSenseManagementInterface, + infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, inventory::Inventory, modules::{ dummy::{ErrorScore, PanicScore, SuccessScore}, @@ -18,7 +19,9 @@ use harmony::{ topology::{LogicalHost, UnmanagedRouter}, }; use harmony_macros::{ip, mac_address}; +use harmony_secret::{Secret, SecretManager}; use harmony_types::net::Url; +use serde::{Deserialize, Serialize}; #[tokio::main] async fn main() { @@ -27,6 +30,26 @@ async fn main() { name: String::from("opnsense-1"), }; + let switch_auth = SecretManager::get_or_prompt::() + .await + .expect("Failed to get credentials"); + + let switches: Vec = vec![ip!("192.168.5.101")]; // TODO: Adjust me + let brocade_options = Some(BrocadeOptions { + dry_run: *harmony::config::DRY_RUN, + ..Default::default() + }); + let switch_client = BrocadeSwitchClient::init( + &switches, + &switch_auth.username, + &switch_auth.password, + brocade_options, + ) + .await + .expect("Failed to connect to switch"); + + let switch_client = Arc::new(switch_client); + let opnsense = Arc::new( harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, ); @@ -54,7 +77,7 @@ async fn main() { name: "cp0".to_string(), }, workers: vec![], - switch: vec![], + switch_client: switch_client.clone(), }; let inventory = Inventory { @@ -109,3 +132,9 @@ async fn main() { .await .unwrap(); } + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index 7be2725..59787a1 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -1,7 +1,5 @@ use async_trait::async_trait; -use brocade::BrocadeOptions; use harmony_macros::ip; -use harmony_secret::SecretManager; use harmony_types::{ net::{MacAddress, Url}, switch::PortLocation, @@ -14,8 +12,6 @@ use log::info; use crate::data::FileContent; use crate::executors::ExecutorError; use crate::hardware::PhysicalHost; -use crate::infra::brocade::BrocadeSwitchAuth; -use crate::infra::brocade::BrocadeSwitchClient; use crate::modules::okd::crd::{ InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec, nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec}, @@ -30,7 +26,6 @@ use super::{ }; use std::collections::BTreeMap; -use std::net::IpAddr; use std::sync::Arc; #[derive(Debug, Clone)] @@ -43,10 +38,10 @@ pub struct HAClusterTopology { pub tftp_server: Arc, pub http_server: Arc, pub dns_server: Arc, + pub switch_client: Arc, pub bootstrap_host: LogicalHost, pub control_plane: Vec, pub workers: Vec, - pub switch: Vec, } #[async_trait] @@ -280,36 +275,15 @@ impl HAClusterTopology { } } - async fn get_switch_client(&self) -> Result, SwitchError> { - let auth = SecretManager::get_or_prompt::() - .await - .map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?; - - // FIXME: We assume Brocade switches - let switches: Vec = self.switch.iter().map(|s| s.ip).collect(); - let brocade_options = Some(BrocadeOptions { - dry_run: *crate::config::DRY_RUN, - ..Default::default() - }); - let client = - BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options) - .await - .map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?; - - Ok(Box::new(client)) - } - async fn configure_port_channel( &self, host: &PhysicalHost, config: &HostNetworkConfig, ) -> Result<(), SwitchError> { debug!("Configuring port channel: {config:#?}"); - let client = self.get_switch_client().await?; - let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect(); - client + self.switch_client .configure_port_channel(&format!("Harmony_{}", host.id), switch_ports) .await .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?; @@ -333,10 +307,10 @@ impl HAClusterTopology { tftp_server: dummy_infra.clone(), http_server: dummy_infra.clone(), dns_server: dummy_infra.clone(), + switch_client: dummy_infra.clone(), bootstrap_host: dummy_host, control_plane: vec![], workers: vec![], - switch: vec![], } } } @@ -494,8 +468,7 @@ impl HttpServer for HAClusterTopology { #[async_trait] impl Switch for HAClusterTopology { async fn setup_switch(&self) -> Result<(), SwitchError> { - let client = self.get_switch_client().await?; - client.setup().await?; + self.switch_client.setup().await?; Ok(()) } @@ -503,8 +476,7 @@ impl Switch for HAClusterTopology { &self, mac_address: &MacAddress, ) -> Result, SwitchError> { - let client = self.get_switch_client().await?; - let port = client.find_port(mac_address).await?; + let port = self.switch_client.find_port(mac_address).await?; Ok(port) } @@ -704,3 +676,25 @@ impl DnsServer for DummyInfra { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } } + +#[async_trait] +impl SwitchClient for DummyInfra { + async fn setup(&self) -> Result<(), SwitchError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + + async fn find_port( + &self, + _mac_address: &MacAddress, + ) -> Result, SwitchError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + + async fn configure_port_channel( + &self, + _channel_name: &str, + _switch_ports: Vec, + ) -> Result { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } +} diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index 99db03a..b78f1a0 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -1,4 +1,10 @@ -use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc}; +use std::{ + error::Error, + fmt::{self, Debug}, + net::Ipv4Addr, + str::FromStr, + sync::Arc, +}; use async_trait::async_trait; use derive_new::new; @@ -19,8 +25,8 @@ pub struct DHCPStaticEntry { pub ip: Ipv4Addr, } -impl std::fmt::Display for DHCPStaticEntry { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for DHCPStaticEntry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mac = self .mac .iter() @@ -42,8 +48,8 @@ pub trait Firewall: Send + Sync { fn get_host(&self) -> LogicalHost; } -impl std::fmt::Debug for dyn Firewall { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl Debug for dyn Firewall { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_fmt(format_args!("Firewall {}", self.get_ip())) } } @@ -65,7 +71,7 @@ pub struct PxeOptions { } #[async_trait] -pub trait DhcpServer: Send + Sync + std::fmt::Debug { +pub trait DhcpServer: Send + Sync + Debug { async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; @@ -104,8 +110,8 @@ pub trait DnsServer: Send + Sync { } } -impl std::fmt::Debug for dyn DnsServer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl Debug for dyn DnsServer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_fmt(format_args!("DnsServer {}", self.get_ip())) } } @@ -141,8 +147,8 @@ pub enum DnsRecordType { TXT, } -impl std::fmt::Display for DnsRecordType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for DnsRecordType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { DnsRecordType::A => write!(f, "A"), DnsRecordType::AAAA => write!(f, "AAAA"), @@ -216,8 +222,8 @@ pub struct SwitchError { msg: String, } -impl std::fmt::Display for SwitchError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for SwitchError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.msg) } } @@ -225,7 +231,7 @@ impl std::fmt::Display for SwitchError { impl Error for SwitchError {} #[async_trait] -pub trait SwitchClient: Send + Sync { +pub trait SwitchClient: Debug + Send + Sync { /// Executes essential, idempotent, one-time initial configuration steps. /// /// This is an opiniated procedure that setups a switch to provide high availability diff --git a/harmony/src/infra/brocade.rs b/harmony/src/infra/brocade.rs index f721328..774c8f8 100644 --- a/harmony/src/infra/brocade.rs +++ b/harmony/src/infra/brocade.rs @@ -1,15 +1,14 @@ use async_trait::async_trait; use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode}; -use harmony_secret::Secret; use harmony_types::{ net::{IpAddress, MacAddress}, switch::{PortDeclaration, PortLocation}, }; use option_ext::OptionExt; -use serde::{Deserialize, Serialize}; use crate::topology::{SwitchClient, SwitchError}; +#[derive(Debug)] pub struct BrocadeSwitchClient { brocade: Box, } @@ -114,12 +113,6 @@ impl SwitchClient for BrocadeSwitchClient { } } -#[derive(Secret, Serialize, Deserialize, Debug)] -pub struct BrocadeSwitchAuth { - pub username: String, - pub password: String, -} - #[cfg(test)] mod tests { use std::sync::{Arc, Mutex}; @@ -235,7 +228,7 @@ mod tests { assert_that!(*configured_interfaces).is_empty(); } - #[derive(Clone)] + #[derive(Debug, Clone)] struct FakeBrocadeClient { stack_topology: Vec, interfaces: Vec,