diff --git a/Cargo.lock b/Cargo.lock index 2af94a0..429f09b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,6 +429,15 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "assertor" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb" +dependencies = [ + "num-traits", +] + [[package]] name = "async-broadcast" version = "0.7.2" @@ -665,6 +674,22 @@ dependencies = [ "serde_with", ] +[[package]] +name = "brocade" +version = "0.1.0" +dependencies = [ + "async-trait", + "env_logger", + "harmony_secret", + "harmony_types", + "log", + "regex", + "russh", + "russh-keys", + "serde", + "tokio", +] + [[package]] name = "brotli" version = "8.0.2" @@ -1795,6 +1820,18 @@ dependencies = [ "url", ] +[[package]] +name = "example-openbao" +version = "0.1.0" +dependencies = [ + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "tokio", + "url", +] + [[package]] name = "example-opnsense" version = "0.1.0" @@ -1881,6 +1918,8 @@ dependencies = [ "env_logger", "harmony", "harmony_macros", + "harmony_secret", + "harmony_secret_derive", "harmony_tui", "harmony_types", "log", @@ -2305,9 +2344,11 @@ name = "harmony" version = "0.1.0" dependencies = [ "askama", + "assertor", "async-trait", "base64 0.22.1", "bollard", + "brocade", "chrono", "cidr", "convert_case", @@ -2338,6 +2379,7 @@ dependencies = [ "once_cell", "opnsense-config", "opnsense-config-xml", + "option-ext", "pretty_assertions", "reqwest 0.11.27", "russh", @@ -3878,6 +3920,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" name = "opnsense-config" version = "0.1.0" dependencies = [ + "assertor", "async-trait", "chrono", "env_logger", @@ -4537,9 +4580,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick 1.1.3", "memchr", @@ -4549,9 +4592,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick 1.1.3", "memchr", diff --git a/Cargo.toml b/Cargo.toml index d92c0e7..a256234 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,9 @@ members = [ "harmony_composer", "harmony_inventory_agent", "harmony_secret_derive", - "harmony_secret", "adr/agent_discovery/mdns", + "harmony_secret", + "adr/agent_discovery/mdns", + "brocade", ] [workspace.package] @@ -66,5 +68,12 @@ thiserror = "2.0.14" serde = { version = "1.0.209", features = ["derive", "rc"] } serde_json = "1.0.127" askama = "0.14" -sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] } -reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false } +sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } +reqwest = { version = "0.12", features = [ + "blocking", + "stream", + "rustls-tls", + "http2", + "json", +], default-features = false } +assertor = "0.0.4" diff --git a/brocade/Cargo.toml b/brocade/Cargo.toml new file mode 100644 index 0000000..89c4fb8 --- /dev/null +++ b/brocade/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "brocade" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +async-trait.workspace = true +harmony_types = { path = "../harmony_types" } +russh.workspace = true +russh-keys.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +regex = "1.11.3" +harmony_secret = { path = "../harmony_secret" } +serde.workspace = true diff --git a/brocade/examples/main.rs b/brocade/examples/main.rs new file mode 100644 index 0000000..34dec21 --- /dev/null +++ b/brocade/examples/main.rs @@ -0,0 +1,70 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use brocade::BrocadeOptions; +use harmony_secret::{Secret, SecretManager}; +use harmony_types::switch::PortLocation; +use serde::{Deserialize, Serialize}; + +#[derive(Secret, Clone, Debug, Serialize, Deserialize)] +struct BrocadeSwitchAuth { + username: String, + password: String, +} + +#[tokio::main] +async fn main() { + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + + // let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet + let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1 + // let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st + let switch_addresses = vec![ip]; + + let config = SecretManager::get_or_prompt::() + .await + .unwrap(); + + let brocade = brocade::init( + &switch_addresses, + 22, + &config.username, + &config.password, + Some(BrocadeOptions { + dry_run: true, + ..Default::default() + }), + ) + .await + .expect("Brocade client failed to connect"); + + let entries = brocade.get_stack_topology().await.unwrap(); + println!("Stack topology: {entries:#?}"); + + let entries = brocade.get_interfaces().await.unwrap(); + println!("Interfaces: {entries:#?}"); + + let version = brocade.version().await.unwrap(); + println!("Version: {version:?}"); + + println!("--------------"); + let mac_adddresses = brocade.get_mac_address_table().await.unwrap(); + println!("VLAN\tMAC\t\t\tPORT"); + for mac in mac_adddresses { + println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port); + } + + println!("--------------"); + let channel_name = "1"; + brocade.clear_port_channel(channel_name).await.unwrap(); + + println!("--------------"); + let channel_id = brocade.find_available_channel_id().await.unwrap(); + + println!("--------------"); + let channel_name = "HARMONY_LAG"; + let ports = [PortLocation(2, 0, 35)]; + brocade + .create_port_channel(channel_id, channel_name, &ports) + .await + .unwrap(); +} diff --git a/brocade/src/fast_iron.rs b/brocade/src/fast_iron.rs new file mode 100644 index 0000000..a1a2478 --- /dev/null +++ b/brocade/src/fast_iron.rs @@ -0,0 +1,211 @@ +use super::BrocadeClient; +use crate::{ + BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry, + PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell, +}; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; +use regex::Regex; +use std::{collections::HashSet, str::FromStr}; + +pub struct FastIronClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl FastIronClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["skip-page-display".into()]); + shell.after_all(vec!["page".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 3 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 3 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[2].to_string(), + ), + _ => ( + 1, + parse_brocade_mac_address(parts[0]).ok()?, + parts[1].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_stack_port_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing stack port entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[0]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: None, + })) + } + + fn build_port_channel_commands( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Vec { + let mut commands = vec![ + "configure terminal".to_string(), + format!("lag {channel_name} static id {channel_id}"), + ]; + + for port in ports { + commands.push(format!("ports ethernet {port}")); + } + + commands.push(format!("primary-port {}", ports[0])); + commands.push("deploy".into()); + commands.push("exit".into()); + commands.push("write memory".into()); + commands.push("exit".into()); + + commands + } +} + +#[async_trait] +impl BrocadeClient for FastIronClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + info!("[Brocade] Showing MAC address table..."); + + let output = self + .shell + .run_command("show mac-address", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show interface stack-ports", crate::ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_stack_port_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + todo!() + } + + async fn configure_interfaces( + &self, + _interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + todo!() + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show lag", ExecutionMode::Regular) + .await?; + let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex"); + + let used_ids: HashSet = output + .lines() + .filter_map(|line| { + re.captures(line) + .and_then(|c| c.get(1)) + .and_then(|id_match| id_match.as_str().parse().ok()) + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let commands = self.build_port_channel_commands(channel_id, channel_name, ports); + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".to_string(), + format!("no lag {channel_name}"), + "write memory".to_string(), + ]; + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/lib.rs b/brocade/src/lib.rs new file mode 100644 index 0000000..3822abd --- /dev/null +++ b/brocade/src/lib.rs @@ -0,0 +1,336 @@ +use std::net::IpAddr; +use std::{ + fmt::{self, Display}, + time::Duration, +}; + +use crate::network_operating_system::NetworkOperatingSystemClient; +use crate::{ + fast_iron::FastIronClient, + shell::{BrocadeSession, BrocadeShell}, +}; + +use async_trait::async_trait; +use harmony_types::net::MacAddress; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use regex::Regex; + +mod fast_iron; +mod network_operating_system; +mod shell; +mod ssh; + +#[derive(Default, Clone, Debug)] +pub struct BrocadeOptions { + pub dry_run: bool, + pub ssh: ssh::SshOptions, + pub timeouts: TimeoutConfig, +} + +#[derive(Clone, Debug)] +pub struct TimeoutConfig { + pub shell_ready: Duration, + pub command_execution: Duration, + pub cleanup: Duration, + pub message_wait: Duration, +} + +impl Default for TimeoutConfig { + fn default() -> Self { + Self { + shell_ready: Duration::from_secs(10), + command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while + cleanup: Duration::from_secs(10), + message_wait: Duration::from_millis(500), + } + } +} + +enum ExecutionMode { + Regular, + Privileged, +} + +#[derive(Clone, Debug)] +pub struct BrocadeInfo { + os: BrocadeOs, + version: String, +} + +#[derive(Clone, Debug)] +pub enum BrocadeOs { + NetworkOperatingSystem, + FastIron, + Unknown, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct MacAddressEntry { + pub vlan: u16, + pub mac_address: MacAddress, + pub port: PortDeclaration, +} + +pub type PortChannelId = u8; + +/// Represents a single physical or logical link connecting two switches within a stack or fabric. +/// +/// This structure provides a standardized view of the topology regardless of the +/// underlying Brocade OS configuration (stacking vs. fabric). +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterSwitchLink { + /// The local port on the switch where the topology command was run. + pub local_port: PortLocation, + /// The port on the directly connected neighboring switch. + pub remote_port: Option, +} + +/// Represents the key running configuration status of a single switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterfaceInfo { + /// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2"). + pub name: String, + /// The physical location of the interface. + pub port_location: PortLocation, + /// The parsed type and name prefix of the interface. + pub interface_type: InterfaceType, + /// The primary configuration mode defining the interface's behavior (L2, L3, Fabric). + pub operating_mode: Option, + /// Indicates the current state of the interface. + pub status: InterfaceStatus, +} + +/// Categorizes the functional type of a switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceType { + /// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet). + Ethernet(String), +} + +impl fmt::Display for InterfaceType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InterfaceType::Ethernet(name) => write!(f, "{name}"), + } + } +} + +/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PortOperatingMode { + /// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled). + Fabric, + /// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`). + Trunk, + /// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode). + Access, +} + +/// Defines the possible status of an interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceStatus { + /// The interface is connected. + Connected, + /// The interface is not connected and is not expected to be. + NotConnected, + /// The interface is not connected but is expected to be (configured with `no shutdown`). + SfpAbsent, +} + +pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, +) -> Result, Error> { + let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?; + + let version_info = shell + .with_session(ExecutionMode::Regular, |session| { + Box::pin(get_brocade_info(session)) + }) + .await?; + + Ok(match version_info.os { + BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)), + BrocadeOs::NetworkOperatingSystem => { + Box::new(NetworkOperatingSystemClient::init(shell, version_info)) + } + BrocadeOs::Unknown => todo!(), + }) +} + +#[async_trait] +pub trait BrocadeClient { + /// Retrieves the operating system and version details from the connected Brocade switch. + /// + /// This is typically the first call made after establishing a connection to determine + /// the switch OS family (e.g., FastIron, NOS) for feature compatibility. + /// + /// # Returns + /// + /// A `BrocadeInfo` structure containing parsed OS type and version string. + async fn version(&self) -> Result; + + /// Retrieves the dynamically learned MAC address table from the switch. + /// + /// This is crucial for discovering where specific network endpoints (MAC addresses) + /// are currently located on the physical ports. + /// + /// # Returns + /// + /// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address, + /// and the associated port name/index. + async fn get_mac_address_table(&self) -> Result, Error>; + + /// Derives the physical connections used to link multiple switches together + /// to form a single logical entity (stack, fabric, etc.). + /// + /// This abstracts the underlying configuration (e.g., stack ports, fabric ports) + /// to return a standardized view of the topology. + /// + /// # Returns + /// + /// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric. + /// If the switch is not stacked, returns an empty vector. + async fn get_stack_topology(&self) -> Result, Error>; + + /// Retrieves the status for all interfaces + /// + /// # Returns + /// + /// A vector of `InterfaceInfo` structures. + async fn get_interfaces(&self) -> Result, Error>; + + /// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.). + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error>; + + /// Scans the existing configuration to find the next available (unused) + /// Port-Channel ID (`lag` or `trunk`) for assignment. + /// + /// # Returns + /// + /// The smallest, unassigned `PortChannelId` within the supported range. + async fn find_available_channel_id(&self) -> Result; + + /// Creates and configures a new Port-Channel (Link Aggregation Group or LAG) + /// using the specified channel ID and ports. + /// + /// The resulting configuration must be persistent (saved to startup-config). + /// Assumes a static LAG configuration mode unless specified otherwise by the implementation. + /// + /// # Parameters + /// + /// * `channel_id`: The ID (e.g., 1-128) for the logical port channel. + /// * `channel_name`: A descriptive name for the LAG (used in configuration context). + /// * `ports`: A slice of `PortLocation` structs defining the physical member ports. + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error>; + + /// Removes all configuration associated with the specified Port-Channel name. + /// + /// This operation should be idempotent; attempting to clear a non-existent + /// channel should succeed (or return a benign error). + /// + /// # Parameters + /// + /// * `channel_name`: The name of the Port-Channel (LAG) to delete. + /// + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>; +} + +async fn get_brocade_info(session: &mut BrocadeSession) -> Result { + let output = session.run_command("show version").await?; + + if output.contains("Network Operating System") { + let re = Regex::new(r"Network Operating System Version:\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::NetworkOperatingSystem, + version, + }); + } else if output.contains("ICX") { + let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::FastIron, + version, + }); + } + + Err(Error::UnexpectedError("Unknown Brocade OS version".into())) +} + +fn parse_brocade_mac_address(value: &str) -> Result { + let cleaned_mac = value.replace('.', ""); + + if cleaned_mac.len() != 12 { + return Err(format!("Invalid MAC address: {value}")); + } + + let mut bytes = [0u8; 6]; + for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() { + let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?; + bytes[i] = + u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?; + } + + Ok(MacAddress(bytes)) +} + +#[derive(Debug)] +pub enum Error { + NetworkError(String), + AuthenticationError(String), + ConfigurationError(String), + TimeoutError(String), + UnexpectedError(String), + CommandError(String), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::NetworkError(msg) => write!(f, "Network error: {msg}"), + Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"), + Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"), + Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"), + Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"), + Error::CommandError(msg) => write!(f, "{msg}"), + } + } +} + +impl From for String { + fn from(val: Error) -> Self { + format!("{val}") + } +} + +impl std::error::Error for Error {} + +impl From for Error { + fn from(value: russh::Error) -> Self { + Error::NetworkError(format!("Russh client error: {value}")) + } +} diff --git a/brocade/src/network_operating_system.rs b/brocade/src/network_operating_system.rs new file mode 100644 index 0000000..b14bc08 --- /dev/null +++ b/brocade/src/network_operating_system.rs @@ -0,0 +1,306 @@ +use std::str::FromStr; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; + +use crate::{ + BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, + InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + parse_brocade_mac_address, shell::BrocadeShell, +}; + +pub struct NetworkOperatingSystemClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl NetworkOperatingSystemClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["terminal length 0".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 5 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 5 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[4].to_string(), + ), + _ => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[5].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_inter_switch_link_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing inter switch link entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[2]).ok()?; + let remote_port = PortLocation::from_str(parts[5]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: Some(remote_port), + })) + } + + fn parse_interface_status_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing interface status entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 6 { + return None; + } + + let interface_type = match parts[0] { + "Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()), + "Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()), + _ => return None, + }; + let port_location = PortLocation::from_str(parts[1]).ok()?; + let status = match parts[2] { + "connected" => InterfaceStatus::Connected, + "notconnected" => InterfaceStatus::NotConnected, + "sfpAbsent" => InterfaceStatus::SfpAbsent, + _ => return None, + }; + let operating_mode = match parts[3] { + "ISL" => Some(PortOperatingMode::Fabric), + "Trunk" => Some(PortOperatingMode::Trunk), + "Access" => Some(PortOperatingMode::Access), + "--" => None, + _ => return None, + }; + + Some(Ok(InterfaceInfo { + name: format!("{} {}", interface_type, port_location), + port_location, + interface_type, + operating_mode, + status, + })) + } +} + +#[async_trait] +impl BrocadeClient for NetworkOperatingSystemClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + let output = self + .shell + .run_command("show mac-address-table", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show fabric isl", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(6) + .filter_map(|line| self.parse_inter_switch_link_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + let output = self + .shell + .run_command( + "show interface status rbridge-id all", + ExecutionMode::Regular, + ) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_interface_status_entry(line)) + .collect() + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + info!("[Brocade] Configuring {} interface(s)...", interfaces.len()); + + let mut commands = vec!["configure terminal".to_string()]; + + for interface in interfaces { + commands.push(format!("interface {}", interface.0)); + + match interface.1 { + PortOperatingMode::Fabric => { + commands.push("fabric isl enable".into()); + commands.push("fabric trunk enable".into()); + } + PortOperatingMode::Trunk => { + commands.push("switchport".into()); + commands.push("switchport mode trunk".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + PortOperatingMode::Access => { + commands.push("switchport".into()); + commands.push("switchport mode access".into()); + commands.push("switchport access vlan 1".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + } + + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Interfaces configured."); + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show port-channel", ExecutionMode::Regular) + .await?; + + let used_ids: Vec = output + .lines() + .skip(6) + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 8 { + return None; + } + + u8::from_str(parts[0]).ok() + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let interfaces = self.get_interfaces().await?; + + let mut commands = vec![ + "configure terminal".into(), + format!("interface port-channel {}", channel_id), + "no shutdown".into(), + "exit".into(), + ]; + + for port in ports { + let interface = interfaces.iter().find(|i| i.port_location == *port); + let Some(interface) = interface else { + continue; + }; + + commands.push(format!("interface {}", interface.name)); + commands.push("no switchport".into()); + commands.push("no ip address".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + commands.push(format!("channel-group {channel_id} mode active")); + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".into(), + format!("no interface port-channel {}", channel_name), + "exit".into(), + "write memory".into(), + ]; + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/shell.rs b/brocade/src/shell.rs new file mode 100644 index 0000000..cfa672d --- /dev/null +++ b/brocade/src/shell.rs @@ -0,0 +1,367 @@ +use std::net::IpAddr; +use std::time::Duration; +use std::time::Instant; + +use crate::BrocadeOptions; +use crate::Error; +use crate::ExecutionMode; +use crate::TimeoutConfig; +use crate::ssh; + +use log::debug; +use log::info; +use russh::ChannelMsg; +use tokio::time::timeout; + +pub struct BrocadeShell { + ip: IpAddr, + port: u16, + username: String, + password: String, + options: BrocadeOptions, + before_all_commands: Vec, + after_all_commands: Vec, +} + +impl BrocadeShell { + pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, + ) -> Result { + let ip = ip_addresses + .first() + .ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?; + + let base_options = options.unwrap_or_default(); + let options = ssh::try_init_client(username, password, ip, base_options).await?; + + Ok(Self { + ip: *ip, + port, + username: username.to_string(), + password: password.to_string(), + before_all_commands: vec![], + after_all_commands: vec![], + options, + }) + } + + pub async fn open_session(&self, mode: ExecutionMode) -> Result { + BrocadeSession::open( + self.ip, + self.port, + &self.username, + &self.password, + self.options.clone(), + mode, + ) + .await + } + + pub async fn with_session(&self, mode: ExecutionMode, callback: F) -> Result + where + F: FnOnce( + &mut BrocadeSession, + ) -> std::pin::Pin< + Box> + Send + '_>, + >, + { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = callback(&mut session).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_command(command).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_commands( + &self, + commands: Vec, + mode: ExecutionMode, + ) -> Result<(), Error> { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_commands(commands).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub fn before_all(&mut self, commands: Vec) { + self.before_all_commands = commands; + } + + pub fn after_all(&mut self, commands: Vec) { + self.after_all_commands = commands; + } +} + +pub struct BrocadeSession { + pub channel: russh::Channel, + pub mode: ExecutionMode, + pub options: BrocadeOptions, +} + +impl BrocadeSession { + pub async fn open( + ip: IpAddr, + port: u16, + username: &str, + password: &str, + options: BrocadeOptions, + mode: ExecutionMode, + ) -> Result { + let client = ssh::create_client(ip, port, username, password, &options).await?; + let mut channel = client.channel_open_session().await?; + + channel + .request_pty(false, "vt100", 80, 24, 0, 0, &[]) + .await?; + channel.request_shell(false).await?; + + wait_for_shell_ready(&mut channel, &options.timeouts).await?; + + if let ExecutionMode::Privileged = mode { + try_elevate_session(&mut channel, username, password, &options.timeouts).await?; + } + + Ok(Self { + channel, + mode, + options, + }) + } + + pub async fn close(&mut self) -> Result<(), Error> { + debug!("[Brocade] Closing session..."); + + self.channel.data(&b"exit\n"[..]).await?; + if let ExecutionMode::Privileged = self.mode { + self.channel.data(&b"exit\n"[..]).await?; + } + + let start = Instant::now(); + while start.elapsed() < self.options.timeouts.cleanup { + match timeout(self.options.timeouts.message_wait, self.channel.wait()).await { + Ok(Some(ChannelMsg::Close)) => break, + Ok(Some(_)) => continue, + Ok(None) | Err(_) => break, + } + } + + debug!("[Brocade] Session closed."); + Ok(()) + } + + pub async fn run_command(&mut self, command: &str) -> Result { + if self.should_skip_command(command) { + return Ok(String::new()); + } + + debug!("[Brocade] Running command: '{command}'..."); + + self.channel + .data(format!("{}\n", command).as_bytes()) + .await?; + tokio::time::sleep(Duration::from_millis(100)).await; + + let output = self.collect_command_output().await?; + let output = String::from_utf8(output) + .map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?; + + self.check_for_command_errors(&output, command)?; + Ok(output) + } + + pub async fn run_commands(&mut self, commands: Vec) -> Result<(), Error> { + for command in commands { + self.run_command(&command).await?; + } + Ok(()) + } + + fn should_skip_command(&self, command: &str) -> bool { + if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run { + info!("[Brocade] Dry-run mode enabled, skipping command: {command}"); + return true; + } + false + } + + async fn collect_command_output(&mut self) -> Result, Error> { + let mut output = Vec::new(); + let start = Instant::now(); + let read_timeout = Duration::from_millis(500); + let log_interval = Duration::from_secs(3); + let mut last_log = Instant::now(); + + loop { + if start.elapsed() > self.options.timeouts.command_execution { + return Err(Error::TimeoutError( + "Timeout waiting for command completion.".into(), + )); + } + + if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval { + info!("[Brocade] Waiting for command output..."); + last_log = Instant::now(); + } + + match timeout(read_timeout, self.channel.wait()).await { + Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => { + output.extend_from_slice(&data); + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output), + Ok(Some(ChannelMsg::ExitStatus { exit_status })) => { + debug!("[Brocade] Command exit status: {exit_status}"); + } + Ok(Some(_)) => continue, + Ok(None) | Err(_) => { + if output.is_empty() { + if let Ok(None) = timeout(read_timeout, self.channel.wait()).await { + break; + } + continue; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + } + } + + Ok(output) + } + + fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> { + const ERROR_PATTERNS: &[&str] = &[ + "invalid input", + "syntax error", + "command not found", + "unknown command", + "permission denied", + "access denied", + "authentication failed", + "configuration error", + "failed to", + "error:", + ]; + + let output_lower = output.to_lowercase(); + if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) { + return Err(Error::CommandError(format!( + "Command '{command}' failed: {}", + output.trim() + ))); + } + + if !command.starts_with("show") && output.trim().is_empty() { + return Err(Error::CommandError(format!( + "Command '{command}' produced no output" + ))); + } + + Ok(()) + } +} + +async fn wait_for_shell_ready( + channel: &mut russh::Channel, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + let mut buffer = Vec::new(); + let start = Instant::now(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + let output = output.trim(); + if output.ends_with('>') || output.ends_with('#') { + debug!("[Brocade] Shell ready"); + return Ok(()); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + Ok(()) +} + +async fn try_elevate_session( + channel: &mut russh::Channel, + username: &str, + password: &str, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + channel.data(&b"enable\n"[..]).await?; + let start = Instant::now(); + let mut buffer = Vec::new(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + return Ok(()); + } + + if output.contains("User Name:") { + channel.data(format!("{}\n", username).as_bytes()).await?; + buffer.clear(); + } else if output.contains("Password:") { + channel.data(format!("{}\n", password).as_bytes()).await?; + buffer.clear(); + } else if output.contains('>') { + return Err(Error::AuthenticationError( + "Enable authentication failed".into(), + )); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + + let output = String::from_utf8_lossy(&buffer); + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + Ok(()) + } else { + Err(Error::AuthenticationError(format!( + "Enable failed. Output:\n{output}" + ))) + } +} diff --git a/brocade/src/ssh.rs b/brocade/src/ssh.rs new file mode 100644 index 0000000..08ff96f --- /dev/null +++ b/brocade/src/ssh.rs @@ -0,0 +1,113 @@ +use std::borrow::Cow; +use std::sync::Arc; + +use async_trait::async_trait; +use russh::client::Handler; +use russh::kex::DH_G1_SHA1; +use russh::kex::ECDH_SHA2_NISTP256; +use russh_keys::key::SSH_RSA; + +use super::BrocadeOptions; +use super::Error; + +#[derive(Default, Clone, Debug)] +pub struct SshOptions { + pub preferred_algorithms: russh::Preferred, +} + +impl SshOptions { + fn ecdhsa_sha2_nistp256() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } + + fn legacy() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[DH_G1_SHA1]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } +} + +pub struct Client; + +#[async_trait] +impl Handler for Client { + type Error = Error; + + async fn check_server_key( + &mut self, + _server_public_key: &russh_keys::key::PublicKey, + ) -> Result { + Ok(true) + } +} + +pub async fn try_init_client( + username: &str, + password: &str, + ip: &std::net::IpAddr, + base_options: BrocadeOptions, +) -> Result { + let ssh_options = vec![ + SshOptions::default(), + SshOptions::ecdhsa_sha2_nistp256(), + SshOptions::legacy(), + ]; + + for ssh in ssh_options { + let opts = BrocadeOptions { + ssh, + ..base_options.clone() + }; + let client = create_client(*ip, 22, username, password, &opts).await; + + match client { + Ok(_) => { + return Ok(opts); + } + Err(e) => match e { + Error::NetworkError(e) => { + if e.contains("No common key exchange algorithm") { + continue; + } else { + return Err(Error::NetworkError(e)); + } + } + _ => return Err(e), + }, + } + } + + Err(Error::NetworkError( + "Could not establish ssh connection: wrong key exchange algorithm)".to_string(), + )) +} + +pub async fn create_client( + ip: std::net::IpAddr, + port: u16, + username: &str, + password: &str, + options: &BrocadeOptions, +) -> Result, Error> { + let config = russh::client::Config { + preferred: options.ssh.preferred_algorithms.clone(), + ..Default::default() + }; + let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?; + if !client.authenticate_password(username, password).await? { + return Err(Error::AuthenticationError( + "ssh authentication failed".to_string(), + )); + } + Ok(client) +} diff --git a/examples/okd_installation/src/topology.rs b/examples/okd_installation/src/topology.rs index 02553a5..31062f5 100644 --- a/examples/okd_installation/src/topology.rs +++ b/examples/okd_installation/src/topology.rs @@ -1,6 +1,6 @@ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + hardware::{Location, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, diff --git a/examples/openbao/Cargo.toml b/examples/openbao/Cargo.toml new file mode 100644 index 0000000..ae0a793 --- /dev/null +++ b/examples/openbao/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "example-openbao" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_macros = { path = "../../harmony_macros" } +harmony_types = { path = "../../harmony_types" } +tokio.workspace = true +url.workspace = true diff --git a/examples/openbao/README.md b/examples/openbao/README.md new file mode 100644 index 0000000..d78556c --- /dev/null +++ b/examples/openbao/README.md @@ -0,0 +1,7 @@ +To install an openbao instance with harmony simply `cargo run -p example-openbao` . + +Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster. + +Then follow the openbao documentation to initialize and unseal, this will make openbao usable. + +https://openbao.org/docs/platform/k8s/helm/run/ diff --git a/examples/openbao/src/main.rs b/examples/openbao/src/main.rs new file mode 100644 index 0000000..52c5119 --- /dev/null +++ b/examples/openbao/src/main.rs @@ -0,0 +1,67 @@ +use std::{collections::HashMap, str::FromStr}; + +use harmony::{ + inventory::Inventory, + modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString}, + topology::K8sAnywhereTopology, +}; +use harmony_macros::hurl; + +#[tokio::main] +async fn main() { + let values_yaml = Some( + r#"server: + standalone: + enabled: true + config: | + listener "tcp" { + tls_disable = true + address = "[::]:8200" + cluster_address = "[::]:8201" + } + + storage "file" { + path = "/openbao/data" + } + + service: + enabled: true + + dataStorage: + enabled: true + size: 10Gi + storageClass: null + accessMode: ReadWriteOnce + + auditStorage: + enabled: true + size: 10Gi + storageClass: null + accessMode: ReadWriteOnce"# + .to_string(), + ); + let openbao = HelmChartScore { + namespace: Some(NonBlankString::from_str("openbao").unwrap()), + release_name: NonBlankString::from_str("openbao").unwrap(), + chart_name: NonBlankString::from_str("openbao/openbao").unwrap(), + chart_version: None, + values_overrides: None, + values_yaml, + create_namespace: true, + install_only: true, + repository: Some(HelmRepository::new( + "openbao".to_string(), + hurl!("https://openbao.github.io/openbao-helm"), + true, + )), + }; + + harmony_cli::run( + Inventory::autoload(), + K8sAnywhereTopology::from_env(), + vec![Box::new(openbao)], + None, + ) + .await + .unwrap(); +} diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index ad57db1..634cbe9 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -77,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" } askama.workspace = true sqlx.workspace = true inquire.workspace = true +brocade = { path = "../brocade" } +option-ext = "0.2.0" [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index c9f565e..7be2725 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -1,33 +1,36 @@ use async_trait::async_trait; +use brocade::BrocadeOptions; use harmony_macros::ip; -use harmony_types::net::MacAddress; -use harmony_types::net::Url; +use harmony_secret::SecretManager; +use harmony_types::{ + net::{MacAddress, Url}, + switch::PortLocation, +}; +use k8s_openapi::api::core::v1::Namespace; +use kube::api::ObjectMeta; use log::debug; use log::info; use crate::data::FileContent; use crate::executors::ExecutorError; +use crate::hardware::PhysicalHost; +use crate::infra::brocade::BrocadeSwitchAuth; +use crate::infra::brocade::BrocadeSwitchClient; +use crate::modules::okd::crd::{ + InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec, + nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec}, +}; use crate::topology::PxeOptions; -use super::DHCPStaticEntry; -use super::DhcpServer; -use super::DnsRecord; -use super::DnsRecordType; -use super::DnsServer; -use super::Firewall; -use super::HttpServer; -use super::IpAddress; -use super::K8sclient; -use super::LoadBalancer; -use super::LoadBalancerService; -use super::LogicalHost; -use super::PreparationError; -use super::PreparationOutcome; -use super::Router; -use super::TftpServer; +use super::{ + DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, + HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, + PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer, + Topology, k8s::K8sClient, +}; -use super::Topology; -use super::k8s::K8sClient; +use std::collections::BTreeMap; +use std::net::IpAddr; use std::sync::Arc; #[derive(Debug, Clone)] @@ -89,6 +92,231 @@ impl HAClusterTopology { .to_string() } + async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> { + // FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate) + debug!("Installing NMState operator..."); + let k8s_client = self.k8s_client().await?; + + let nmstate_namespace = Namespace { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + finalizers: Some(vec!["kubernetes".to_string()]), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState namespace: {nmstate_namespace:#?}"); + k8s_client + .apply(&nmstate_namespace, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_operator_group = OperatorGroup { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: OperatorGroupSpec { + target_namespaces: vec!["openshift-nmstate".to_string()], + }, + }; + debug!("Creating NMState operator group: {nmstate_operator_group:#?}"); + k8s_client + .apply(&nmstate_operator_group, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_subscription = Subscription { + metadata: ObjectMeta { + name: Some("kubernetes-nmstate-operator".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: SubscriptionSpec { + channel: Some("stable".to_string()), + install_plan_approval: Some(InstallPlanApproval::Automatic), + name: "kubernetes-nmstate-operator".to_string(), + source: "redhat-operators".to_string(), + source_namespace: "openshift-marketplace".to_string(), + }, + }; + debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}"); + k8s_client + .apply(&nmstate_subscription, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate = NMState { + metadata: ObjectMeta { + name: Some("nmstate".to_string()), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState: {nmstate:#?}"); + k8s_client + .apply(&nmstate, None) + .await + .map_err(|e| e.to_string())?; + + Ok(()) + } + + fn get_next_bond_id(&self) -> u8 { + 42 // FIXME: Find a better way to declare the bond id + } + + async fn configure_bond( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.ensure_nmstate_operator_installed() + .await + .map_err(|e| { + SwitchError::new(format!( + "Can't configure bond, NMState operator not available: {e}" + )) + })?; + + let bond_config = self.create_bond_configuration(host, config); + debug!("Configuring bond for host {host:?}: {bond_config:#?}"); + self.k8s_client() + .await + .unwrap() + .apply(&bond_config, None) + .await + .unwrap(); + + todo!() + } + + fn create_bond_configuration( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> NodeNetworkConfigurationPolicy { + let host_name = host.id.clone(); + + let bond_id = self.get_next_bond_id(); + let bond_name = format!("bond{bond_id}"); + let mut bond_mtu: Option = None; + let mut bond_mac_address: Option = None; + let mut bond_ports = Vec::new(); + let mut interfaces: Vec = Vec::new(); + + for switch_port in &config.switch_ports { + let interface_name = switch_port.interface.name.clone(); + + interfaces.push(nmstate::InterfaceSpec { + name: interface_name.clone(), + description: Some(format!("Member of bond {bond_name}")), + r#type: "ethernet".to_string(), + state: "up".to_string(), + mtu: Some(switch_port.interface.mtu), + mac_address: Some(switch_port.interface.mac_address.to_string()), + ipv4: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + link_aggregation: None, + ..Default::default() + }); + + bond_ports.push(interface_name); + + // Use the first port's details for the bond mtu and mac address + if bond_mtu.is_none() { + bond_mtu = Some(switch_port.interface.mtu); + } + if bond_mac_address.is_none() { + bond_mac_address = Some(switch_port.interface.mac_address.to_string()); + } + } + + interfaces.push(nmstate::InterfaceSpec { + name: bond_name.clone(), + description: Some(format!("Network bond for host {host_name}")), + r#type: "bond".to_string(), + state: "up".to_string(), + mtu: bond_mtu, + mac_address: bond_mac_address, + ipv4: Some(nmstate::IpStackSpec { + dhcp: Some(true), + enabled: Some(true), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + dhcp: Some(true), + autoconf: Some(true), + enabled: Some(true), + ..Default::default() + }), + link_aggregation: Some(nmstate::BondSpec { + mode: "802.3ad".to_string(), + ports: bond_ports, + ..Default::default() + }), + ..Default::default() + }); + + NodeNetworkConfigurationPolicy { + metadata: ObjectMeta { + name: Some(format!("{host_name}-bond-config")), + ..Default::default() + }, + spec: NodeNetworkConfigurationPolicySpec { + node_selector: Some(BTreeMap::from([( + "kubernetes.io/hostname".to_string(), + host_name.to_string(), + )])), + desired_state: nmstate::DesiredStateSpec { interfaces }, + }, + } + } + + async fn get_switch_client(&self) -> Result, SwitchError> { + let auth = SecretManager::get_or_prompt::() + .await + .map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?; + + // FIXME: We assume Brocade switches + let switches: Vec = self.switch.iter().map(|s| s.ip).collect(); + let brocade_options = Some(BrocadeOptions { + dry_run: *crate::config::DRY_RUN, + ..Default::default() + }); + let client = + BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options) + .await + .map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?; + + Ok(Box::new(client)) + } + + async fn configure_port_channel( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + debug!("Configuring port channel: {config:#?}"); + let client = self.get_switch_client().await?; + + let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect(); + + client + .configure_port_channel(&format!("Harmony_{}", host.id), switch_ports) + .await + .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?; + + Ok(()) + } + pub fn autoload() -> Self { let dummy_infra = Arc::new(DummyInfra {}); let dummy_host = LogicalHost { @@ -263,6 +491,33 @@ impl HttpServer for HAClusterTopology { } } +#[async_trait] +impl Switch for HAClusterTopology { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let client = self.get_switch_client().await?; + client.setup().await?; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let client = self.get_switch_client().await?; + let port = client.find_port(mac_address).await?; + Ok(port) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.configure_bond(host, &config).await?; + self.configure_port_channel(host, &config).await + } +} + #[derive(Debug)] pub struct DummyInfra; @@ -332,8 +587,8 @@ impl DhcpServer for DummyInfra { } async fn set_dhcp_range( &self, - start: &IpAddress, - end: &IpAddress, + _start: &IpAddress, + _end: &IpAddress, ) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index 88bd2e8..5a1e6ec 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -1,13 +1,19 @@ +use std::time::Duration; + use derive_new::new; use k8s_openapi::{ ClusterResourceScope, NamespaceResourceScope, - api::{apps::v1::Deployment, core::v1::Pod}, + api::{ + apps::v1::Deployment, + core::v1::{Pod, PodStatus}, + }, }; use kube::{ Client, Config, Error, Resource, api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt}, config::{KubeConfigOptions, Kubeconfig}, core::ErrorResponse, + error::DiscoveryError, runtime::reflector::Lookup, }; use kube::{api::DynamicObject, runtime::conditions}; @@ -19,7 +25,7 @@ use log::{debug, error, trace}; use serde::{Serialize, de::DeserializeOwned}; use serde_json::{Value, json}; use similar::TextDiff; -use tokio::io::AsyncReadExt; +use tokio::{io::AsyncReadExt, time::sleep}; #[derive(new, Clone)] pub struct K8sClient { @@ -153,6 +159,41 @@ impl K8sClient { } } + pub async fn wait_for_pod_ready( + &self, + pod_name: &str, + namespace: Option<&str>, + ) -> Result<(), Error> { + let mut elapsed = 0; + let interval = 5; // seconds between checks + let timeout_secs = 120; + loop { + let pod = self.get_pod(pod_name, namespace).await?; + + if let Some(p) = pod { + if let Some(status) = p.status { + if let Some(phase) = status.phase { + if phase.to_lowercase() == "running" { + return Ok(()); + } + } + } + } + + if elapsed >= timeout_secs { + return Err(Error::Discovery(DiscoveryError::MissingResource(format!( + "'{}' in ns '{}' did not become ready within {}s", + pod_name, + namespace.unwrap(), + timeout_secs + )))); + } + + sleep(Duration::from_secs(interval)).await; + elapsed += interval; + } + } + /// Will execute a commond in the first pod found that matches the specified label /// '{label}={name}' pub async fn exec_app_capture_output( @@ -419,9 +460,12 @@ impl K8sClient { .as_str() .expect("couldn't get kind as str"); - let split: Vec<&str> = api_version.splitn(2, "/").collect(); - let g = split[0]; - let v = split[1]; + let mut it = api_version.splitn(2, '/'); + let first = it.next().unwrap(); + let (g, v) = match it.next() { + Some(second) => (first, second), + None => ("", first), + }; let gvk = GroupVersionKind::gvk(g, v, kind); let api_resource = ApiResource::from_gvk(&gvk); diff --git a/harmony/src/domain/topology/load_balancer.rs b/harmony/src/domain/topology/load_balancer.rs index 901602b..59c5add 100644 --- a/harmony/src/domain/topology/load_balancer.rs +++ b/harmony/src/domain/topology/load_balancer.rs @@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync { &self, service: &LoadBalancerService, ) -> Result<(), ExecutorError> { - debug!( - "Listing LoadBalancer services {:?}", - self.list_services().await - ); - if !self.list_services().await.contains(service) { - self.add_service(service).await?; - } + self.add_service(service).await?; Ok(()) } } diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index c7ab5cc..99db03a 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -1,10 +1,14 @@ -use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; +use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc}; use async_trait::async_trait; -use harmony_types::net::{IpAddress, MacAddress}; +use derive_new::new; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::PortLocation, +}; use serde::Serialize; -use crate::executors::ExecutorError; +use crate::{executors::ExecutorError, hardware::PhysicalHost}; use super::{LogicalHost, k8s::K8sClient}; @@ -172,6 +176,80 @@ impl FromStr for DnsRecordType { } } +#[async_trait] +pub trait Switch: Send + Sync { + async fn setup_switch(&self) -> Result<(), SwitchError>; + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError>; +} + +#[derive(Clone, Debug, PartialEq)] +pub struct HostNetworkConfig { + pub switch_ports: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SwitchPort { + pub interface: NetworkInterface, + pub port: PortLocation, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct NetworkInterface { + pub name: String, + pub mac_address: MacAddress, + pub speed_mbps: Option, + pub mtu: u32, +} + +#[derive(Debug, Clone, new)] +pub struct SwitchError { + msg: String, +} + +impl std::fmt::Display for SwitchError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.msg) + } +} + +impl Error for SwitchError {} + +#[async_trait] +pub trait SwitchClient: Send + Sync { + /// Executes essential, idempotent, one-time initial configuration steps. + /// + /// This is an opiniated procedure that setups a switch to provide high availability + /// capabilities as decided by the NationTech team. + /// + /// This includes tasks like enabling switchport for all interfaces + /// except the ones intended for Fabric Networking, etc. + /// + /// The implementation must ensure the operation is **idempotent** (safe to run multiple times) + /// and that it doesn't break existing configurations. + async fn setup(&self) -> Result<(), SwitchError>; + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result; +} + #[cfg(test)] mod test { use std::sync::Arc; diff --git a/harmony/src/infra/brocade.rs b/harmony/src/infra/brocade.rs new file mode 100644 index 0000000..f721328 --- /dev/null +++ b/harmony/src/infra/brocade.rs @@ -0,0 +1,385 @@ +use async_trait::async_trait; +use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode}; +use harmony_secret::Secret; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::{PortDeclaration, PortLocation}, +}; +use option_ext::OptionExt; +use serde::{Deserialize, Serialize}; + +use crate::topology::{SwitchClient, SwitchError}; + +pub struct BrocadeSwitchClient { + brocade: Box, +} + +impl BrocadeSwitchClient { + pub async fn init( + ip_addresses: &[IpAddress], + username: &str, + password: &str, + options: Option, + ) -> Result { + let brocade = brocade::init(ip_addresses, 22, username, password, options).await?; + Ok(Self { brocade }) + } +} + +#[async_trait] +impl SwitchClient for BrocadeSwitchClient { + async fn setup(&self) -> Result<(), SwitchError> { + let stack_topology = self + .brocade + .get_stack_topology() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces = self + .brocade + .get_interfaces() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces: Vec<(String, PortOperatingMode)> = interfaces + .into_iter() + .filter(|interface| { + interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected + }) + .filter(|interface| { + !stack_topology.iter().any(|link: &InterSwitchLink| { + link.local_port == interface.port_location + || link.remote_port.contains(&interface.port_location) + }) + }) + .map(|interface| (interface.name.clone(), PortOperatingMode::Access)) + .collect(); + + if interfaces.is_empty() { + return Ok(()); + } + + self.brocade + .configure_interfaces(interfaces) + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + Ok(()) + } + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let table = self + .brocade + .get_mac_address_table() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + let port = table + .iter() + .find(|entry| entry.mac_address == *mac_address) + .map(|entry| match &entry.port { + PortDeclaration::Single(port_location) => Ok(port_location.clone()), + _ => Err(SwitchError::new( + "Multiple ports found for MAC address".into(), + )), + }); + + match port { + Some(Ok(p)) => Ok(Some(p)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result { + let channel_id = self + .brocade + .find_available_channel_id() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + self.brocade + .create_port_channel(channel_id, channel_name, &switch_ports) + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + Ok(channel_id) + } +} + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + + use assertor::*; + use async_trait::async_trait; + use brocade::{ + BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus, + InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + }; + use harmony_types::switch::PortLocation; + + use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient}; + + #[tokio::test] + async fn setup_should_configure_ethernet_interfaces_as_access_ports() { + let first_interface = given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(); + let second_interface = given_interface() + .with_port_location(PortLocation(1, 0, 4)) + .build(); + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![first_interface.clone(), second_interface.clone()], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).contains_exactly(vec![ + (first_interface.name.clone(), PortOperatingMode::Access), + (second_interface.name.clone(), PortOperatingMode::Access), + ]); + } + + #[tokio::test] + async fn setup_with_an_already_configured_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_operating_mode(Some(PortOperatingMode::Access)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_a_disconnected_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_status(InterfaceStatus::SfpAbsent) + .build(), + given_interface() + .with_status(InterfaceStatus::NotConnected) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![ + given_inter_switch_link() + .between(PortLocation(1, 0, 1), PortLocation(2, 0, 1)) + .build(), + given_inter_switch_link() + .between(PortLocation(2, 0, 2), PortLocation(3, 0, 1)) + .build(), + ], + vec![ + given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(2, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(3, 0, 1)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[derive(Clone)] + struct FakeBrocadeClient { + stack_topology: Vec, + interfaces: Vec, + configured_interfaces: Arc>>, + } + + #[async_trait] + impl BrocadeClient for FakeBrocadeClient { + async fn version(&self) -> Result { + todo!() + } + + async fn get_mac_address_table(&self) -> Result, Error> { + todo!() + } + + async fn get_stack_topology(&self) -> Result, Error> { + Ok(self.stack_topology.clone()) + } + + async fn get_interfaces(&self) -> Result, Error> { + Ok(self.interfaces.clone()) + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + let mut configured_interfaces = self.configured_interfaces.lock().unwrap(); + *configured_interfaces = interfaces; + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + todo!() + } + + async fn create_port_channel( + &self, + _channel_id: PortChannelId, + _channel_name: &str, + _ports: &[PortLocation], + ) -> Result<(), Error> { + todo!() + } + + async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> { + todo!() + } + } + + impl FakeBrocadeClient { + fn new(stack_topology: Vec, interfaces: Vec) -> Self { + Self { + stack_topology, + interfaces, + configured_interfaces: Arc::new(Mutex::new(vec![])), + } + } + } + + struct InterfaceInfoBuilder { + port_location: Option, + interface_type: Option, + operating_mode: Option, + status: Option, + } + + impl InterfaceInfoBuilder { + fn build(&self) -> InterfaceInfo { + let interface_type = self + .interface_type + .clone() + .unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into())); + let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1)); + let name = format!("{interface_type} {port_location}"); + let status = self.status.clone().unwrap_or(InterfaceStatus::Connected); + + InterfaceInfo { + name, + port_location, + interface_type, + operating_mode: self.operating_mode.clone(), + status, + } + } + + fn with_port_location(self, port_location: PortLocation) -> Self { + Self { + port_location: Some(port_location), + ..self + } + } + + fn with_operating_mode(self, operating_mode: Option) -> Self { + Self { + operating_mode, + ..self + } + } + + fn with_status(self, status: InterfaceStatus) -> Self { + Self { + status: Some(status), + ..self + } + } + } + + struct InterSwitchLinkBuilder { + link: Option<(PortLocation, PortLocation)>, + } + + impl InterSwitchLinkBuilder { + fn build(&self) -> InterSwitchLink { + let link = self + .link + .clone() + .unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1))); + + InterSwitchLink { + local_port: link.0, + remote_port: Some(link.1), + } + } + + fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self { + Self { + link: Some((local_port, remote_port)), + } + } + } + + fn given_interface() -> InterfaceInfoBuilder { + InterfaceInfoBuilder { + port_location: None, + interface_type: None, + operating_mode: None, + status: None, + } + } + + fn given_inter_switch_link() -> InterSwitchLinkBuilder { + InterSwitchLinkBuilder { link: None } + } +} diff --git a/harmony/src/infra/mod.rs b/harmony/src/infra/mod.rs index c05c7b6..203cf90 100644 --- a/harmony/src/infra/mod.rs +++ b/harmony/src/infra/mod.rs @@ -1,3 +1,4 @@ +pub mod brocade; pub mod executors; pub mod hp_ilo; pub mod intel_amt; diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index ce47f05..3df7511 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall { } async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { - warn!( - "TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here" - ); let mut config = self.opnsense_config.write().await; + let mut load_balancer = config.load_balancer(); + let (frontend, backend, servers, healthcheck) = harmony_load_balancer_service_to_haproxy_xml(service); - let mut load_balancer = config.load_balancer(); - load_balancer.add_backend(backend); - load_balancer.add_frontend(frontend); - load_balancer.add_servers(servers); - if let Some(healthcheck) = healthcheck { - load_balancer.add_healthcheck(healthcheck); - } + + load_balancer.configure_service(frontend, backend, servers, healthcheck); Ok(()) } @@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( .backends .backends .iter() - .find(|b| b.uuid == frontend.default_backend); + .find(|b| Some(b.uuid.clone()) == frontend.default_backend); let mut health_check = None; match matching_backend { @@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( } None => { warn!( - "HAProxy config could not find a matching backend for frontend {:?}", - frontend + "HAProxy config could not find a matching backend for frontend {frontend:?}" ); } } @@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend( .servers .iter() .filter_map(|server| { + let address = server.address.clone()?; + let port = server.port?; + if backend_servers.contains(&server.uuid.as_str()) { - return Some(BackendServer { - address: server.address.clone(), - port: server.port, - }); + return Some(BackendServer { address, port }); } None }) @@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( name: format!("frontend_{}", service.listening_port), bind: service.listening_port.to_string(), mode: "tcp".to_string(), // TODO do not depend on health check here - default_backend: backend.uuid.clone(), + default_backend: Some(backend.uuid.clone()), ..Default::default() }; info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); @@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer { uuid: Uuid::new_v4().to_string(), name: format!("{}_{}", &server.address, &server.port), enabled: 1, - address: server.address.clone(), - port: server.port, + address: Some(server.address.clone()), + port: Some(server.port), mode: "active".to_string(), server_type: "static".to_string(), ..Default::default() @@ -385,8 +378,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -411,8 +404,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -431,8 +424,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -453,16 +446,16 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "some-hostname.test.mcd".to_string(), - port: 80, + address: Some("some-hostname.test.mcd".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); let server = HAProxyServer { uuid: "server2".to_string(), - address: "192.168.1.2".to_string(), - port: 8080, + address: Some("192.168.1.2".to_string()), + port: Some(8080), ..Default::default() }; haproxy.servers.servers.push(server); diff --git a/harmony/src/modules/application/features/helm_argocd_score.rs b/harmony/src/modules/application/features/helm_argocd_score.rs index ea53691..2e51a9e 100644 --- a/harmony/src/modules/application/features/helm_argocd_score.rs +++ b/harmony/src/modules/application/features/helm_argocd_score.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use harmony_macros::hurl; use kube::{Api, api::GroupVersionKind}; use log::{debug, warn}; use non_blank_string_rs::NonBlankString; @@ -1051,7 +1052,7 @@ commitServer: install_only: false, repository: Some(HelmRepository::new( "argo".to_string(), - url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(), + hurl!("https://argoproj.github.io/argo-helm"), true, )), } diff --git a/harmony/src/modules/cert_manager/helm.rs b/harmony/src/modules/cert_manager/helm.rs index eae0ed6..b0770f9 100644 --- a/harmony/src/modules/cert_manager/helm.rs +++ b/harmony/src/modules/cert_manager/helm.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, str::FromStr}; +use harmony_macros::hurl; use non_blank_string_rs::NonBlankString; use serde::Serialize; use url::Url; @@ -33,7 +34,7 @@ impl Score for CertManagerHelmScore { install_only: true, repository: Some(HelmRepository::new( "jetstack".to_string(), - Url::parse("https://charts.jetstack.io").unwrap(), + hurl!("https://charts.jetstack.io"), true, )), } diff --git a/harmony/src/modules/helm/chart.rs b/harmony/src/modules/helm/chart.rs index e2f8057..4b678f1 100644 --- a/harmony/src/modules/helm/chart.rs +++ b/harmony/src/modules/helm/chart.rs @@ -5,6 +5,7 @@ use crate::score::Score; use crate::topology::{HelmCommand, Topology}; use async_trait::async_trait; use harmony_types::id::Id; +use harmony_types::net::Url; use helm_wrapper_rs; use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; use log::{debug, info, warn}; @@ -15,7 +16,6 @@ use std::path::Path; use std::process::{Command, Output, Stdio}; use std::str::FromStr; use temp_file::TempFile; -use url::Url; #[derive(Debug, Clone, Serialize)] pub struct HelmRepository { @@ -78,7 +78,8 @@ impl HelmChartInterpret { repo.name, repo.url, repo.force_update ); - let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()]; + let repo_url = repo.url.to_string(); + let mut add_args = vec!["repo", "add", &repo.name, &repo_url]; if repo.force_update { add_args.push("--force-update"); } diff --git a/harmony/src/modules/helm/command.rs b/harmony/src/modules/helm/command.rs deleted file mode 100644 index c4d92c1..0000000 --- a/harmony/src/modules/helm/command.rs +++ /dev/null @@ -1,364 +0,0 @@ -use async_trait::async_trait; -use log::debug; -use serde::Serialize; -use std::collections::HashMap; -use std::io::ErrorKind; -use std::path::PathBuf; -use std::process::{Command, Output}; -use temp_dir::{self, TempDir}; -use temp_file::TempFile; - -use crate::data::Version; -use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; -use crate::inventory::Inventory; -use crate::score::Score; -use crate::topology::{HelmCommand, K8sclient, Topology}; -use harmony_types::id::Id; - -#[derive(Clone)] -pub struct HelmCommandExecutor { - pub env: HashMap, - pub path: Option, - pub args: Vec, - pub api_versions: Option>, - pub kube_version: String, - pub debug: Option, - pub globals: HelmGlobals, - pub chart: HelmChart, -} - -#[derive(Clone)] -pub struct HelmGlobals { - pub chart_home: Option, - pub config_home: Option, -} - -#[derive(Debug, Clone, Serialize)] -pub struct HelmChart { - pub name: String, - pub version: Option, - pub repo: Option, - pub release_name: Option, - pub namespace: Option, - pub additional_values_files: Vec, - pub values_file: Option, - pub values_inline: Option, - pub include_crds: Option, - pub skip_hooks: Option, - pub api_versions: Option>, - pub kube_version: Option, - pub name_template: String, - pub skip_tests: Option, - pub debug: Option, -} - -impl HelmCommandExecutor { - pub fn generate(mut self) -> Result { - if self.globals.chart_home.is_none() { - self.globals.chart_home = Some(PathBuf::from("charts")); - } - - if self - .clone() - .chart - .clone() - .chart_exists_locally(self.clone().globals.chart_home.unwrap()) - .is_none() - { - if self.chart.repo.is_none() { - return Err(std::io::Error::new( - ErrorKind::Other, - "Chart doesn't exist locally and no repo specified", - )); - } - self.clone().run_command( - self.chart - .clone() - .pull_command(self.globals.chart_home.clone().unwrap()), - )?; - } - - let out = self.clone().run_command( - self.chart - .clone() - .helm_args(self.globals.chart_home.clone().unwrap()), - )?; - - // TODO: don't use unwrap here - let s = String::from_utf8(out.stdout).unwrap(); - debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap()); - debug!("helm status: {}", out.status); - debug!("helm output: {s}"); - - let clean = s.split_once("---").unwrap().1; - - Ok(clean.to_string()) - } - - pub fn version(self) -> Result { - let out = self.run_command(vec![ - "version".to_string(), - "-c".to_string(), - "--short".to_string(), - ])?; - - // TODO: don't use unwrap - Ok(String::from_utf8(out.stdout).unwrap()) - } - - pub fn run_command(mut self, mut args: Vec) -> Result { - if let Some(d) = self.debug { - if d { - args.push("--debug".to_string()); - } - } - - let path = if let Some(p) = self.path { - p - } else { - PathBuf::from("helm") - }; - - let config_home = match self.globals.config_home { - Some(p) => p, - None => PathBuf::from(TempDir::new()?.path()), - }; - - if let Some(yaml_str) = self.chart.values_inline { - let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes()); - self.chart - .additional_values_files - .push(PathBuf::from(tf.path())); - }; - - self.env.insert( - "HELM_CONFIG_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - self.env.insert( - "HELM_CACHE_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - self.env.insert( - "HELM_DATA_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - - Command::new(path).envs(self.env).args(args).output() - } -} - -impl HelmChart { - pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option { - let chart_path = - PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string()); - - if chart_path.exists() { - Some(chart_path) - } else { - None - } - } - - pub fn pull_command(self, chart_home: PathBuf) -> Vec { - let mut args = vec![ - "pull".to_string(), - "--untar".to_string(), - "--untardir".to_string(), - chart_home.to_str().unwrap().to_string(), - ]; - - match self.repo { - Some(r) => { - if r.starts_with("oci://") { - args.push( - r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(), - ); - } else { - args.push("--repo".to_string()); - args.push(r.to_string()); - - args.push(self.name); - } - } - None => args.push(self.name), - }; - - if let Some(v) = self.version { - args.push("--version".to_string()); - args.push(v.to_string()); - } - - args - } - - pub fn helm_args(self, chart_home: PathBuf) -> Vec { - let mut args: Vec = vec!["template".to_string()]; - - match self.release_name { - Some(rn) => args.push(rn.to_string()), - None => args.push("--generate-name".to_string()), - } - - args.push( - PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str()) - .to_str() - .unwrap() - .to_string(), - ); - - if let Some(n) = self.namespace { - args.push("--namespace".to_string()); - args.push(n.to_string()); - } - - if let Some(f) = self.values_file { - args.push("-f".to_string()); - args.push(f.to_str().unwrap().to_string()); - } - - for f in self.additional_values_files { - args.push("-f".to_string()); - args.push(f.to_str().unwrap().to_string()); - } - - if let Some(vv) = self.api_versions { - for v in vv { - args.push("--api-versions".to_string()); - args.push(v); - } - } - - if let Some(kv) = self.kube_version { - args.push("--kube-version".to_string()); - args.push(kv); - } - - if let Some(crd) = self.include_crds { - if crd { - args.push("--include-crds".to_string()); - } - } - - if let Some(st) = self.skip_tests { - if st { - args.push("--skip-tests".to_string()); - } - } - - if let Some(sh) = self.skip_hooks { - if sh { - args.push("--no-hooks".to_string()); - } - } - - if let Some(d) = self.debug { - if d { - args.push("--debug".to_string()); - } - } - - args - } -} - -#[derive(Debug, Clone, Serialize)] -pub struct HelmChartScoreV2 { - pub chart: HelmChart, -} - -impl Score for HelmChartScoreV2 { - fn create_interpret(&self) -> Box> { - Box::new(HelmChartInterpretV2 { - score: self.clone(), - }) - } - - fn name(&self) -> String { - format!( - "{} {} HelmChartScoreV2", - self.chart - .release_name - .clone() - .unwrap_or("Unknown".to_string()), - self.chart.name - ) - } -} - -#[derive(Debug, Serialize)] -pub struct HelmChartInterpretV2 { - pub score: HelmChartScoreV2, -} -impl HelmChartInterpretV2 {} - -#[async_trait] -impl Interpret for HelmChartInterpretV2 { - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - let _ns = self - .score - .chart - .namespace - .as_ref() - .unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster")); - - let helm_executor = HelmCommandExecutor { - env: HashMap::new(), - path: None, - args: vec![], - api_versions: None, - kube_version: "v1.33.0".to_string(), - debug: Some(false), - globals: HelmGlobals { - chart_home: None, - config_home: None, - }, - chart: self.score.chart.clone(), - }; - - // let mut helm_options = Vec::new(); - // if self.score.create_namespace { - // helm_options.push(NonBlankString::from_str("--create-namespace").unwrap()); - // } - - let res = helm_executor.generate(); - - let _output = match res { - Ok(output) => output, - Err(err) => return Err(InterpretError::new(err.to_string())), - }; - - // TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client - - // let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap(); - - // let client = topology - // .k8s_client() - // .await - // .expect("Environment should provide enough information to instanciate a client") - // .apply_namespaced(&vec![output], Some(ns.to_string().as_str())); - // match client.apply_yaml(output) { - // Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())), - // Err(e) => return Err(InterpretError::new(e)), - // } - - Ok(Outcome::success("Helm chart deployed".to_string())) - } - - fn get_name(&self) -> InterpretName { - InterpretName::HelmCommand - } - fn get_version(&self) -> Version { - todo!() - } - fn get_status(&self) -> InterpretStatus { - todo!() - } - fn get_children(&self) -> Vec { - todo!() - } -} diff --git a/harmony/src/modules/helm/mod.rs b/harmony/src/modules/helm/mod.rs index de69381..831fbe5 100644 --- a/harmony/src/modules/helm/mod.rs +++ b/harmony/src/modules/helm/mod.rs @@ -1,2 +1 @@ pub mod chart; -pub mod command; diff --git a/harmony/src/modules/monitoring/mod.rs b/harmony/src/modules/monitoring/mod.rs index b93f0c6..edda516 100644 --- a/harmony/src/modules/monitoring/mod.rs +++ b/harmony/src/modules/monitoring/mod.rs @@ -4,4 +4,5 @@ pub mod application_monitoring; pub mod grafana; pub mod kube_prometheus; pub mod ntfy; +pub mod okd; pub mod prometheus; diff --git a/harmony/src/modules/monitoring/okd/enable_user_workload.rs b/harmony/src/modules/monitoring/okd/enable_user_workload.rs new file mode 100644 index 0000000..b322b4d --- /dev/null +++ b/harmony/src/modules/monitoring/okd/enable_user_workload.rs @@ -0,0 +1,149 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{K8sclient, Topology, k8s::K8sClient}, +}; +use async_trait::async_trait; +use harmony_types::id::Id; +use k8s_openapi::api::core::v1::ConfigMap; +use kube::api::ObjectMeta; +use serde::Serialize; + +#[derive(Clone, Debug, Serialize)] +pub struct OpenshiftUserWorkloadMonitoring {} + +impl Score for OpenshiftUserWorkloadMonitoring { + fn name(&self) -> String { + "OpenshiftUserWorkloadMonitoringScore".to_string() + } + + fn create_interpret(&self) -> Box> { + Box::new(OpenshiftUserWorkloadMonitoringInterpret {}) + } +} + +#[derive(Clone, Debug, Serialize)] +pub struct OpenshiftUserWorkloadMonitoringInterpret {} + +#[async_trait] +impl Interpret for OpenshiftUserWorkloadMonitoringInterpret { + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + let client = topology.k8s_client().await.unwrap(); + self.update_cluster_monitoring_config_cm(&client).await?; + self.update_user_workload_monitoring_config_cm(&client) + .await?; + self.verify_user_workload(&client).await?; + Ok(Outcome::success( + "successfully enabled user-workload-monitoring".to_string(), + )) + } + + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OpenshiftUserWorkloadMonitoring") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} + +impl OpenshiftUserWorkloadMonitoringInterpret { + pub async fn update_cluster_monitoring_config_cm( + &self, + client: &Arc, + ) -> Result { + let mut data = BTreeMap::new(); + data.insert( + "config.yaml".to_string(), + r#" +enableUserWorkload: true +alertmanagerMain: + enableUserAlertmanagerConfig: true +"# + .to_string(), + ); + + let cm = ConfigMap { + metadata: ObjectMeta { + name: Some("cluster-monitoring-config".to_string()), + namespace: Some("openshift-monitoring".to_string()), + ..Default::default() + }, + data: Some(data), + ..Default::default() + }; + client.apply(&cm, Some("openshift-monitoring")).await?; + + Ok(Outcome::success( + "updated cluster-monitoring-config-map".to_string(), + )) + } + + pub async fn update_user_workload_monitoring_config_cm( + &self, + client: &Arc, + ) -> Result { + let mut data = BTreeMap::new(); + data.insert( + "config.yaml".to_string(), + r#" +alertmanager: + enabled: true + enableAlertmanagerConfig: true +"# + .to_string(), + ); + let cm = ConfigMap { + metadata: ObjectMeta { + name: Some("user-workload-monitoring-config".to_string()), + namespace: Some("openshift-user-workload-monitoring".to_string()), + ..Default::default() + }, + data: Some(data), + ..Default::default() + }; + client + .apply(&cm, Some("openshift-user-workload-monitoring")) + .await?; + + Ok(Outcome::success( + "updated openshift-user-monitoring-config-map".to_string(), + )) + } + + pub async fn verify_user_workload( + &self, + client: &Arc, + ) -> Result { + let namespace = "openshift-user-workload-monitoring"; + let alertmanager_name = "alertmanager-user-workload-0"; + let prometheus_name = "prometheus-user-workload-0"; + client + .wait_for_pod_ready(alertmanager_name, Some(namespace)) + .await?; + client + .wait_for_pod_ready(prometheus_name, Some(namespace)) + .await?; + + Ok(Outcome::success(format!( + "pods: {}, {} ready in ns: {}", + alertmanager_name, prometheus_name, namespace + ))) + } +} diff --git a/harmony/src/modules/monitoring/okd/mod.rs b/harmony/src/modules/monitoring/okd/mod.rs new file mode 100644 index 0000000..50339ba --- /dev/null +++ b/harmony/src/modules/monitoring/okd/mod.rs @@ -0,0 +1 @@ +pub mod enable_user_workload; diff --git a/harmony/src/modules/okd/bootstrap_03_control_plane.rs b/harmony/src/modules/okd/bootstrap_03_control_plane.rs index ba9e12d..af8e71f 100644 --- a/harmony/src/modules/okd/bootstrap_03_control_plane.rs +++ b/harmony/src/modules/okd/bootstrap_03_control_plane.rs @@ -5,8 +5,10 @@ use crate::{ interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::{HostRole, Inventory}, modules::{ - dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, - inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, + dhcp::DhcpHostBindingScore, + http::IPxeMacBootFileScore, + inventory::DiscoverHostForRoleScore, + okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl}, }, score::Score, topology::{HAClusterTopology, HostBinding}, @@ -28,7 +30,7 @@ pub struct OKDSetup03ControlPlaneScore {} impl Score for OKDSetup03ControlPlaneScore { fn create_interpret(&self) -> Box> { - Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) + Box::new(OKDSetup03ControlPlaneInterpret::new()) } fn name(&self) -> String { @@ -38,17 +40,15 @@ impl Score for OKDSetup03ControlPlaneScore { #[derive(Debug, Clone)] pub struct OKDSetup03ControlPlaneInterpret { - score: OKDSetup03ControlPlaneScore, version: Version, status: InterpretStatus, } impl OKDSetup03ControlPlaneInterpret { - pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { + pub fn new() -> Self { let version = Version::from("1.0.0").unwrap(); Self { version, - score, status: InterpretStatus::QUEUED, } } @@ -159,7 +159,7 @@ impl OKDSetup03ControlPlaneInterpret { } .to_string(); - debug!("[ControlPlane] iPXE content template:\n{}", content); + debug!("[ControlPlane] iPXE content template:\n{content}"); // Create and apply an iPXE boot file for each node. for node in nodes { @@ -189,16 +189,13 @@ impl OKDSetup03ControlPlaneInterpret { /// Prompts the user to reboot the target control plane nodes. async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); - info!( - "[ControlPlane] Requesting reboot for control plane nodes: {:?}", - node_ids - ); + info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",); let confirmation = inquire::Confirm::new( &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; if !confirmation { return Err(InterpretError::new( @@ -210,14 +207,23 @@ impl OKDSetup03ControlPlaneInterpret { } /// Placeholder for automating network bonding configuration. - async fn persist_network_bond(&self) -> Result<(), InterpretError> { - // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. - info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); + async fn persist_network_bond( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + hosts: &Vec, + ) -> Result<(), InterpretError> { + info!("[ControlPlane] Ensuring persistent bonding"); + let score = HostNetworkConfigurationScore { + hosts: hosts.clone(), + }; + score.interpret(inventory, topology).await?; + inquire::Confirm::new( "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; Ok(()) } @@ -260,7 +266,8 @@ impl Interpret for OKDSetup03ControlPlaneInterpret { self.reboot_targets(&nodes).await?; // 5. Placeholder for post-boot network configuration (e.g., bonding). - self.persist_network_bond().await?; + self.persist_network_bond(inventory, topology, &nodes) + .await?; // TODO: Implement a step to wait for the control plane nodes to join the cluster // and for the cluster operators to become available. This would be similar to diff --git a/harmony/src/modules/okd/bootstrap_load_balancer.rs b/harmony/src/modules/okd/bootstrap_load_balancer.rs index ccc69c9..e99fe97 100644 --- a/harmony/src/modules/okd/bootstrap_load_balancer.rs +++ b/harmony/src/modules/okd/bootstrap_load_balancer.rs @@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore { address: topology.bootstrap_host.ip.to_string(), port, }); + + backend.dedup(); backend } } diff --git a/harmony/src/modules/okd/crd/mod.rs b/harmony/src/modules/okd/crd/mod.rs new file mode 100644 index 0000000..c1a68ce --- /dev/null +++ b/harmony/src/modules/okd/crd/mod.rs @@ -0,0 +1,41 @@ +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +pub mod nmstate; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1", + kind = "OperatorGroup", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct OperatorGroupSpec { + pub target_namespaces: Vec, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1alpha1", + kind = "Subscription", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct SubscriptionSpec { + pub name: String, + pub source: String, + pub source_namespace: String, + pub channel: Option, + pub install_plan_approval: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub enum InstallPlanApproval { + #[serde(rename = "Automatic")] + Automatic, + #[serde(rename = "Manual")] + Manual, +} diff --git a/harmony/src/modules/okd/crd/nmstate.rs b/harmony/src/modules/okd/crd/nmstate.rs new file mode 100644 index 0000000..5f71e4e --- /dev/null +++ b/harmony/src/modules/okd/crd/nmstate.rs @@ -0,0 +1,251 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)] +#[serde(rename_all = "camelCase")] +pub struct NMStateSpec { + pub probe_configuration: Option, +} + +impl Default for NMState { + fn default() -> Self { + Self { + metadata: Default::default(), + spec: NMStateSpec { + probe_configuration: None, + }, + } + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeConfig { + pub dns: ProbeDns, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeDns { + pub host: String, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "nmstate.io", + version = "v1", + kind = "NodeNetworkConfigurationPolicy", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct NodeNetworkConfigurationPolicySpec { + pub node_selector: Option>, + pub desired_state: DesiredStateSpec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct DesiredStateSpec { + pub interfaces: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InterfaceSpec { + pub name: String, + pub description: Option, + pub r#type: String, + pub state: String, + pub mac_address: Option, + pub mtu: Option, + pub controller: Option, + pub ipv4: Option, + pub ipv6: Option, + pub ethernet: Option, + pub link_aggregation: Option, + pub vlan: Option, + pub vxlan: Option, + pub mac_vtap: Option, + pub mac_vlan: Option, + pub infiniband: Option, + pub linux_bridge: Option, + pub ovs_bridge: Option, + pub ethtool: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpStackSpec { + pub enabled: Option, + pub dhcp: Option, + pub autoconf: Option, + pub address: Option>, + pub auto_dns: Option, + pub auto_gateway: Option, + pub auto_routes: Option, + pub dhcp_client_id: Option, + pub dhcp_duid: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpAddressSpec { + pub ip: String, + pub prefix_length: u8, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthernetSpec { + pub speed: Option, + pub duplex: Option, + pub auto_negotiation: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct BondSpec { + pub mode: String, + pub ports: Vec, + pub options: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanSpec { + pub base_iface: String, + pub id: u16, + pub protocol: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VxlanSpec { + pub base_iface: String, + pub id: u32, + pub remote: String, + pub local: Option, + pub learning: Option, + pub destination_port: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVtapSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVlanSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InfinibandSpec { + pub base_iface: String, + pub pkey: String, + pub mode: String, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeOptions { + pub mac_ageing_time: Option, + pub multicast_snooping: Option, + pub stp: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct StpOptions { + pub enabled: Option, + pub forward_delay: Option, + pub hello_time: Option, + pub max_age: Option, + pub priority: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePort { + pub name: String, + pub vlan: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePortVlan { + pub mode: Option, + pub trunk_tags: Option>, + pub tag: Option, + pub enable_native: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanTag { + pub id: u16, + pub id_range: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanIdRange { + pub min: u16, + pub max: u16, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeOptions { + pub stp: Option, + pub rstp: Option, + pub mcast_snooping_enable: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsPortSpec { + pub name: String, + pub link_aggregation: Option, + pub vlan: Option, + pub r#type: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolSpec { + // TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool) +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolFecSpec { + pub auto: Option, + pub mode: Option, +} diff --git a/harmony/src/modules/okd/host_network.rs b/harmony/src/modules/okd/host_network.rs new file mode 100644 index 0000000..3bc8c3c --- /dev/null +++ b/harmony/src/modules/okd/host_network.rs @@ -0,0 +1,394 @@ +use async_trait::async_trait; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology}, +}; + +#[derive(Debug, Clone, Serialize)] +pub struct HostNetworkConfigurationScore { + pub hosts: Vec, +} + +impl Score for HostNetworkConfigurationScore { + fn name(&self) -> String { + "HostNetworkConfigurationScore".into() + } + + fn create_interpret(&self) -> Box> { + Box::new(HostNetworkConfigurationInterpret { + score: self.clone(), + }) + } +} + +#[derive(Debug)] +pub struct HostNetworkConfigurationInterpret { + score: HostNetworkConfigurationScore, +} + +impl HostNetworkConfigurationInterpret { + async fn configure_network_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result<(), InterpretError> { + let switch_ports = self.collect_switch_ports_for_host(topology, host).await?; + if !switch_ports.is_empty() { + topology + .configure_host_network(host, HostNetworkConfig { switch_ports }) + .await + .map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?; + } + + Ok(()) + } + + async fn collect_switch_ports_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result, InterpretError> { + let mut switch_ports = vec![]; + + for network_interface in &host.network { + let mac_address = network_interface.mac_address; + + match topology.get_port_for_mac_address(&mac_address).await { + Ok(Some(port)) => { + switch_ports.push(SwitchPort { + interface: NetworkInterface { + name: network_interface.name.clone(), + mac_address, + speed_mbps: network_interface.speed_mbps, + mtu: network_interface.mtu, + }, + port, + }); + } + Ok(None) => debug!("No port found for host '{}', skipping", host.id), + Err(e) => { + return Err(InterpretError::new(format!( + "Failed to get port for host '{}': {}", + host.id, e + ))); + } + } + } + + Ok(switch_ports) + } +} + +#[async_trait] +impl Interpret for HostNetworkConfigurationInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("HostNetworkConfigurationInterpret") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + if self.score.hosts.is_empty() { + return Ok(Outcome::noop("No hosts to configure".into())); + } + + info!( + "Started network configuration for {} host(s)...", + self.score.hosts.len() + ); + + topology + .setup_switch() + .await + .map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?; + + let mut configured_host_count = 0; + for host in &self.score.hosts { + self.configure_network_for_host(topology, host).await?; + configured_host_count += 1; + } + + if configured_host_count > 0 { + Ok(Outcome::success(format!( + "Configured {configured_host_count}/{} host(s)", + self.score.hosts.len() + ))) + } else { + Ok(Outcome::noop("No hosts configured".into())) + } + } +} + +#[cfg(test)] +mod tests { + use assertor::*; + use harmony_types::{net::MacAddress, switch::PortLocation}; + use lazy_static::lazy_static; + + use crate::{ + hardware::HostCategory, + topology::{ + HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort, + }, + }; + use std::{ + str::FromStr, + sync::{Arc, Mutex}, + }; + + use super::*; + + lazy_static! { + pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap(); + pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap(); + pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(), + name: "interface-1".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(), + name: "interface-2".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(), + name: "unknown-interface".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref PORT: PortLocation = PortLocation(1, 0, 42); + pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42); + } + + #[tokio::test] + async fn should_setup_switch() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let switch_setup = topology.switch_setup.lock().unwrap(); + assert_that!(*switch_setup).is_true(); + } + + #[tokio::test] + async fn host_with_one_mac_address_should_create_bond_with_one_interface() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + )]); + } + + #[tokio::test] + async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() { + let score = given_score(vec![given_host( + &HOST_ID, + vec![ + EXISTING_INTERFACE.clone(), + ANOTHER_EXISTING_INTERFACE.clone(), + ], + )]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![ + SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }, + SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }, + ], + }, + )]); + } + + #[tokio::test] + async fn multiple_hosts_should_create_one_bond_per_host() { + let score = given_score(vec![ + given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]), + given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]), + ]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![ + ( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + ), + ( + ANOTHER_HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }], + }, + ), + ]); + } + + #[tokio::test] + async fn port_not_found_for_mac_address_should_not_configure_interface() { + let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]); + let topology = TopologyWithSwitch::new_port_not_found(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).is_empty(); + } + + fn given_score(hosts: Vec) -> HostNetworkConfigurationScore { + HostNetworkConfigurationScore { hosts } + } + + fn given_host(id: &Id, network_interfaces: Vec) -> PhysicalHost { + let network = network_interfaces.iter().map(given_interface).collect(); + + PhysicalHost { + id: id.clone(), + category: HostCategory::Server, + network, + storage: vec![], + labels: vec![], + memory_modules: vec![], + cpus: vec![], + } + } + + fn given_interface( + interface: &NetworkInterface, + ) -> harmony_inventory_agent::hwinfo::NetworkInterface { + harmony_inventory_agent::hwinfo::NetworkInterface { + name: interface.name.clone(), + mac_address: interface.mac_address, + speed_mbps: interface.speed_mbps, + is_up: true, + mtu: interface.mtu, + ipv4_addresses: vec![], + ipv6_addresses: vec![], + driver: "driver".into(), + firmware_version: None, + } + } + + struct TopologyWithSwitch { + available_ports: Arc>>, + configured_host_networks: Arc>>, + switch_setup: Arc>, + } + + impl TopologyWithSwitch { + fn new() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + + fn new_port_not_found() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + } + + #[async_trait] + impl Topology for TopologyWithSwitch { + fn name(&self) -> &str { + "SwitchWithPortTopology" + } + + async fn ensure_ready(&self) -> Result { + Ok(PreparationOutcome::Success { details: "".into() }) + } + } + + #[async_trait] + impl Switch for TopologyWithSwitch { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let mut switch_configured = self.switch_setup.lock().unwrap(); + *switch_configured = true; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + _mac_address: &MacAddress, + ) -> Result, SwitchError> { + let mut ports = self.available_ports.lock().unwrap(); + if ports.is_empty() { + return Ok(None); + } + Ok(Some(ports.remove(0))) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + let mut configured_host_networks = self.configured_host_networks.lock().unwrap(); + configured_host_networks.push((host.id.clone(), config.clone())); + + Ok(()) + } + } +} diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index 1bd4514..a12f132 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -19,3 +19,5 @@ pub use bootstrap_03_control_plane::*; pub use bootstrap_04_workers::*; pub use bootstrap_05_sanity_check::*; pub use bootstrap_06_installation_report::*; +pub mod crd; +pub mod host_network; diff --git a/harmony_composer/src/main.rs b/harmony_composer/src/main.rs index 4119460..817bb8b 100644 --- a/harmony_composer/src/main.rs +++ b/harmony_composer/src/main.rs @@ -54,6 +54,9 @@ struct DeployArgs { #[arg(long = "profile", short = 'p', default_value = "dev")] harmony_profile: HarmonyProfile, + + #[arg(long = "dry-run", short = 'd', default_value = "false")] + dry_run: bool, } #[derive(Args, Clone, Debug)] @@ -178,6 +181,7 @@ async fn main() { command .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}")) .env("HARMONY_PROFILE", format!("{}", args.harmony_profile)) + .env("HARMONY_DRY_RUN", format!("{}", args.dry_run)) .arg("-y") .arg("-a"); diff --git a/harmony_types/src/id.rs b/harmony_types/src/id.rs index 2cb2674..0a82906 100644 --- a/harmony_types/src/id.rs +++ b/harmony_types/src/id.rs @@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize}; /// /// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per /// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Id { value: String, } diff --git a/harmony_types/src/lib.rs b/harmony_types/src/lib.rs index 7bb1abd..098379a 100644 --- a/harmony_types/src/lib.rs +++ b/harmony_types/src/lib.rs @@ -1,2 +1,3 @@ pub mod id; pub mod net; +pub mod switch; diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index 594a3e2..51de86e 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct MacAddress(pub [u8; 6]); impl MacAddress { @@ -41,7 +41,7 @@ impl TryFrom for MacAddress { bytes[i] = u8::from_str_radix(part, 16).map_err(|_| { std::io::Error::new( std::io::ErrorKind::InvalidInput, - format!("Invalid hex value in part {}: '{}'", i, part), + format!("Invalid hex value in part {i}: '{part}'"), ) })?; } @@ -106,8 +106,8 @@ impl Serialize for Url { impl std::fmt::Display for Url { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Url::LocalFolder(path) => write!(f, "{}", path), - Url::Url(url) => write!(f, "{}", url), + Url::LocalFolder(path) => write!(f, "{path}"), + Url::Url(url) => write!(f, "{url}"), } } } diff --git a/harmony_types/src/switch.rs b/harmony_types/src/switch.rs new file mode 100644 index 0000000..2d32754 --- /dev/null +++ b/harmony_types/src/switch.rs @@ -0,0 +1,176 @@ +use std::{fmt, str::FromStr}; + +/// Simple error type for port parsing failures. +#[derive(Debug)] +pub enum PortParseError { + /// The port string did not conform to the expected S/M/P or range format. + InvalidFormat, + /// A stack, module, or port segment could not be parsed as a number. + InvalidSegment(String), +} + +impl fmt::Display for PortParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortParseError::InvalidFormat => write!(f, "Port string is in an unexpected format."), + PortParseError::InvalidSegment(s) => write!(f, "Invalid segment in port string: {}", s), + } + } +} + +/// Represents the atomic, physical location of a switch port: `//`. +/// +/// Example: `1/1/1` +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct PortLocation(pub u8, pub u8, pub u8); + +impl fmt::Display for PortLocation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}/{}", self.0, self.1, self.2) + } +} + +impl FromStr for PortLocation { + type Err = PortParseError; + + /// Parses a string slice into a `PortLocation`. + /// + /// # Examples + /// + /// ```rust + /// use std::str::FromStr; + /// use harmony_types::switch::PortLocation; + /// + /// assert_eq!(PortLocation::from_str("1/1/1").unwrap(), PortLocation(1, 1, 1)); + /// assert_eq!(PortLocation::from_str("12/5/48").unwrap(), PortLocation(12, 5, 48)); + /// assert!(PortLocation::from_str("1/A/1").is_err()); + /// ``` + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('/').collect(); + + if parts.len() != 3 { + return Err(PortParseError::InvalidFormat); + } + + let parse_segment = |part: &str| -> Result { + u8::from_str(part).map_err(|_| PortParseError::InvalidSegment(part.to_string())) + }; + + let stack = parse_segment(parts[0])?; + let module = parse_segment(parts[1])?; + let port = parse_segment(parts[2])?; + + Ok(PortLocation(stack, module, port)) + } +} + +/// Represents a Port configuration input, which can be a single port, a sequential range, +/// or an explicit set defined by endpoints. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub enum PortDeclaration { + /// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)` + Single(PortLocation), + /// A strictly sequential range defined by two endpoints using the hyphen separator (`-`). + /// All ports between the endpoints (inclusive) are implicitly included. + /// Example: `PortDeclaration::Range(1/1/1, 1/1/4)` + Range(PortLocation, PortLocation), + /// A set of ports defined by two endpoints using the asterisk separator (`*`). + /// The actual member ports must be determined contextually (e.g., from MAC tables or + /// explicit configuration lists). + /// Example: `PortDeclaration::Set(1/1/1, 1/1/3)` where only ports 1 and 3 might be active. + Set(PortLocation, PortLocation), +} + +impl PortDeclaration { + /// Parses a port configuration string into a structured `PortDeclaration` enum. + /// + /// This function performs only basic format and numerical parsing, assuming the input + /// strings (e.g., from `show` commands) are semantically valid and logically ordered. + /// + /// # Supported Formats + /// + /// * **Single Port:** `"1/1/1"` + /// * **Range (Hyphen, `-`):** `"1/1/1-1/1/4"` + /// * **Set (Asterisk, `*`):** `"1/1/1*1/1/4"` + /// + /// # Errors + /// + /// Returns `PortParseError` if the string format is incorrect or numerical segments + /// cannot be parsed. + /// + /// # Examples + /// + /// ```rust + /// use harmony_types::switch::{PortDeclaration, PortLocation}; + /// + /// // Single Port + /// assert_eq!(PortDeclaration::parse("3/2/15").unwrap(), PortDeclaration::Single(PortLocation(3, 2, 15))); + /// + /// // Range (Hyphen) - implies sequential ports + /// let result_range = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + /// assert_eq!(result_range, PortDeclaration::Range(PortLocation(1, 1, 1), PortLocation(1, 1, 4))); + /// + /// // Set (Asterisk) - implies non-sequential set defined by endpoints + /// let result_set = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + /// assert_eq!(result_set, PortDeclaration::Set(PortLocation(1, 1, 48), PortLocation(2, 1, 48))); + /// + /// // Invalid Format (will still fail basic parsing) + /// assert!(PortDeclaration::parse("1/1/1/1").is_err()); + /// ``` + pub fn parse(port_str: &str) -> Result { + if let Some((start_str, end_str)) = port_str.split_once('-') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Range(start_port, end_port)); + } + + if let Some((start_str, end_str)) = port_str.split_once('*') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Set(start_port, end_port)); + } + + let location = PortLocation::from_str(port_str)?; + Ok(PortDeclaration::Single(location)) + } +} + +impl fmt::Display for PortDeclaration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortDeclaration::Single(port) => write!(f, "{port}"), + PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"), + PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_port_location_invalid() { + assert!(PortLocation::from_str("1/1").is_err()); + assert!(PortLocation::from_str("1/A/1").is_err()); + assert!(PortLocation::from_str("1/1/256").is_err()); + } + + #[test] + fn test_parse_declaration_single() { + let single_result = PortDeclaration::parse("1/1/4").unwrap(); + assert!(matches!(single_result, PortDeclaration::Single(_))); + } + + #[test] + fn test_parse_declaration_range() { + let range_result = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + assert!(matches!(range_result, PortDeclaration::Range(_, _))); + } + + #[test] + fn test_parse_declaration_set() { + let set_result = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + assert!(matches!(set_result, PortDeclaration::Set(_, _))); + } +} diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index ef631f3..b0aedc2 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId { } } -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct HAProxyId(String); impl Default for HAProxyId { @@ -297,7 +297,7 @@ pub struct HAProxyFrontends { pub frontend: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Frontend { #[yaserde(attribute = true)] pub uuid: String, @@ -310,7 +310,7 @@ pub struct Frontend { pub bind_options: MaybeString, pub mode: String, #[yaserde(rename = "defaultBackend")] - pub default_backend: String, + pub default_backend: Option, pub ssl_enabled: i32, pub ssl_certificates: MaybeString, pub ssl_default_certificate: MaybeString, @@ -416,7 +416,7 @@ pub struct HAProxyBackends { pub backends: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyBackend { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -535,7 +535,7 @@ pub struct HAProxyServers { pub servers: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyServer { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -543,8 +543,8 @@ pub struct HAProxyServer { pub enabled: u8, pub name: String, pub description: MaybeString, - pub address: String, - pub port: u16, + pub address: Option, + pub port: Option, pub checkport: MaybeString, pub mode: String, pub multiplexer_protocol: MaybeString, @@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks { pub healthchecks: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyHealthCheck { #[yaserde(attribute = true)] pub uuid: String, diff --git a/opnsense-config/Cargo.toml b/opnsense-config/Cargo.toml index 0580cb2..bb682df 100644 --- a/opnsense-config/Cargo.toml +++ b/opnsense-config/Cargo.toml @@ -25,6 +25,7 @@ sha2 = "0.10.9" [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] } diff --git a/opnsense-config/src/config/manager/ssh.rs b/opnsense-config/src/config/manager/ssh.rs index 4b2fe64..afe232f 100644 --- a/opnsense-config/src/config/manager/ssh.rs +++ b/opnsense-config/src/config/manager/ssh.rs @@ -30,8 +30,7 @@ impl SshConfigManager { self.opnsense_shell .exec(&format!( - "cp /conf/config.xml /conf/backup/{}", - backup_filename + "cp /conf/config.xml /conf/backup/{backup_filename}" )) .await } diff --git a/opnsense-config/src/config/shell/mod.rs b/opnsense-config/src/config/shell/mod.rs index aa03837..aa94ff1 100644 --- a/opnsense-config/src/config/shell/mod.rs +++ b/opnsense-config/src/config/shell/mod.rs @@ -1,9 +1,7 @@ mod ssh; -pub use ssh::*; - -use async_trait::async_trait; - use crate::Error; +use async_trait::async_trait; +pub use ssh::*; #[async_trait] pub trait OPNsenseShell: std::fmt::Debug + Send + Sync { diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 6c71ed4..00cb364 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -1,11 +1,8 @@ -use std::sync::Arc; - -use log::warn; +use crate::{config::OPNsenseShell, Error}; use opnsense_config_xml::{ Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense, }; - -use crate::{config::OPNsenseShell, Error}; +use std::{collections::HashSet, sync::Arc}; pub struct LoadBalancerConfig<'a> { opnsense: &'a mut OPNsense, @@ -31,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> { match &mut self.opnsense.opnsense.haproxy.as_mut() { Some(haproxy) => f(haproxy), None => unimplemented!( - "Adding a backend is not supported when haproxy config does not exist yet" + "Cannot configure load balancer when haproxy config does not exist yet" ), } } @@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> { self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32); } - pub fn add_backend(&mut self, backend: HAProxyBackend) { - warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks"); - self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend)); + /// Configures a service by removing any existing service on the same port + /// and then adding the new definition. This ensures idempotency. + pub fn configure_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { + self.remove_service_by_bind_address(&frontend.bind); + self.remove_servers(&servers); + + self.add_new_service(frontend, backend, servers, healthcheck); } - pub fn add_frontend(&mut self, frontend: Frontend) { - self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend)); + // Remove the corresponding real servers based on their name if they already exist. + fn remove_servers(&mut self, servers: &[HAProxyServer]) { + let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect(); + self.with_haproxy(|haproxy| { + haproxy + .servers + .servers + .retain(|s| !server_names.contains(&s.name)); + }); } - pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) { - self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck)); + /// Removes a service and its dependent components based on the frontend's bind address. + /// This performs a cascading delete of the frontend, backend, servers, and health check. + fn remove_service_by_bind_address(&mut self, bind_address: &str) { + self.with_haproxy(|haproxy| { + let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else { + return; + }; + + let Some(old_backend) = remove_backend(haproxy, old_frontend) else { + return; + }; + + remove_healthcheck(haproxy, &old_backend); + remove_linked_servers(haproxy, &old_backend); + }); } - pub fn add_servers(&mut self, mut servers: Vec) { - self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers)); + /// Adds the components of a new service to the HAProxy configuration. + /// This function de-duplicates servers by name to prevent configuration errors. + fn add_new_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { + self.with_haproxy(|haproxy| { + if let Some(check) = healthcheck { + haproxy.healthchecks.healthchecks.push(check); + } + + haproxy.servers.servers.extend(servers); + haproxy.backends.backends.push(backend); + haproxy.frontends.frontend.push(frontend); + }); } pub async fn reload_restart(&self) -> Result<(), Error> { @@ -82,3 +125,262 @@ impl<'a> LoadBalancerConfig<'a> { Ok(()) } } + +fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option { + let pos = haproxy + .frontends + .frontend + .iter() + .position(|f| f.bind == bind_address); + + match pos { + Some(pos) => Some(haproxy.frontends.frontend.remove(pos)), + None => None, + } +} + +fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option { + let default_backend = old_frontend.default_backend?; + let pos = haproxy + .backends + .backends + .iter() + .position(|b| b.uuid == default_backend); + + match pos { + Some(pos) => Some(haproxy.backends.backends.remove(pos)), + None => None, // orphaned frontend, shouldn't happen + } +} + +fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(uuid) = &backend.health_check.content { + haproxy + .healthchecks + .healthchecks + .retain(|h| h.uuid != *uuid); + } +} + +/// Remove the backend's servers. This assumes servers are not shared between services. +fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(server_uuids_str) = &backend.linked_servers.content { + let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect(); + haproxy + .servers + .servers + .retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str())); + } +} + +#[cfg(test)] +mod tests { + use crate::config::DummyOPNSenseShell; + use assertor::*; + use opnsense_config_xml::{ + Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck, + HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense, + }; + use std::sync::Arc; + + use super::LoadBalancerConfig; + + static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80"; + static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443"; + + static SERVER_ADDRESS: &str = "1.1.1.1:80"; + static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443"; + + #[test] + fn configure_service_should_add_all_service_components_to_haproxy() { + let mut opnsense = given_opnsense(); + let mut load_balancer = given_load_balancer(&mut opnsense); + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + + load_balancer.configure_service( + frontend.clone(), + backend.clone(), + servers.clone(), + Some(healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend], + vec![backend], + servers, + vec![healthcheck], + ); + } + + #[test] + fn configure_service_should_replace_service_on_same_bind_address() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) = + given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + + load_balancer.configure_service( + updated_frontend.clone(), + updated_backend.clone(), + updated_servers.clone(), + Some(updated_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![updated_frontend], + vec![updated_backend], + updated_servers, + vec![updated_healthcheck], + ); + } + + #[test] + fn configure_service_should_keep_existing_service_on_different_bind_addresses() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let (other_healthcheck, other_servers, other_backend, other_frontend) = + given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + load_balancer.configure_service( + other_frontend.clone(), + other_backend.clone(), + other_servers.clone(), + Some(other_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend, other_frontend], + vec![backend, other_backend], + [servers, other_servers].concat(), + vec![healthcheck, other_healthcheck], + ); + } + + fn assert_haproxy_configured_with( + opnsense: OPNsense, + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) { + let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap(); + assert_that!(haproxy.frontends.frontend).contains_exactly(frontends); + assert_that!(haproxy.backends.backends).contains_exactly(backends); + assert_that!(haproxy.servers.servers).is_equal_to(servers); + assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks); + } + + fn given_opnsense() -> OPNsense { + OPNsense::default() + } + + fn given_opnsense_with(haproxy: HAProxy) -> OPNsense { + let mut opnsense = OPNsense::default(); + opnsense.opnsense.haproxy = Some(haproxy); + + opnsense + } + + fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> { + let opnsense_shell = Arc::new(DummyOPNSenseShell {}); + if opnsense.opnsense.haproxy.is_none() { + opnsense.opnsense.haproxy = Some(HAProxy::default()); + } + LoadBalancerConfig::new(opnsense, opnsense_shell) + } + + fn given_service( + bind_address: &str, + server_address: &str, + ) -> ( + HAProxyHealthCheck, + Vec, + HAProxyBackend, + Frontend, + ) { + let healthcheck = given_healthcheck(); + let servers = vec![given_server(server_address)]; + let backend = given_backend(); + let frontend = given_frontend(bind_address); + (healthcheck, servers, backend, frontend) + } + + fn given_haproxy( + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) -> HAProxy { + HAProxy { + frontends: HAProxyFrontends { + frontend: frontends, + }, + backends: HAProxyBackends { backends }, + servers: HAProxyServers { servers }, + healthchecks: HAProxyHealthChecks { healthchecks }, + ..Default::default() + } + } + + fn given_frontend(bind_address: &str) -> Frontend { + Frontend { + uuid: "uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: format!("frontend_{bind_address}"), + bind: bind_address.into(), + default_backend: Some("backend-uuid".into()), + ..Default::default() + } + } + + fn given_backend() -> HAProxyBackend { + HAProxyBackend { + uuid: "backend-uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: "backend_192.168.1.1:80".into(), + linked_servers: MaybeString::from("server-uuid"), + health_check_enabled: 1, + health_check: MaybeString::from("healthcheck-uuid"), + ..Default::default() + } + } + + fn given_server(address: &str) -> HAProxyServer { + HAProxyServer { + uuid: "server-uuid".into(), + id: HAProxyId::default(), + name: address.into(), + address: Some(address.into()), + ..Default::default() + } + } + + fn given_healthcheck() -> HAProxyHealthCheck { + HAProxyHealthCheck { + uuid: "healthcheck-uuid".into(), + name: "healthcheck".into(), + ..Default::default() + } + } +}