diff --git a/Cargo.lock b/Cargo.lock index 1a92012..429f09b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -674,6 +674,22 @@ dependencies = [ "serde_with", ] +[[package]] +name = "brocade" +version = "0.1.0" +dependencies = [ + "async-trait", + "env_logger", + "harmony_secret", + "harmony_types", + "log", + "regex", + "russh", + "russh-keys", + "serde", + "tokio", +] + [[package]] name = "brotli" version = "8.0.2" @@ -1804,6 +1820,18 @@ dependencies = [ "url", ] +[[package]] +name = "example-openbao" +version = "0.1.0" +dependencies = [ + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "tokio", + "url", +] + [[package]] name = "example-opnsense" version = "0.1.0" @@ -2316,9 +2344,11 @@ name = "harmony" version = "0.1.0" dependencies = [ "askama", + "assertor", "async-trait", "base64 0.22.1", "bollard", + "brocade", "chrono", "cidr", "convert_case", @@ -2349,6 +2379,7 @@ dependencies = [ "once_cell", "opnsense-config", "opnsense-config-xml", + "option-ext", "pretty_assertions", "reqwest 0.11.27", "russh", @@ -4549,9 +4580,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick 1.1.3", "memchr", @@ -4561,9 +4592,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick 1.1.3", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 32231d7..a256234 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "harmony_secret_derive", "harmony_secret", "adr/agent_discovery/mdns", + "brocade", ] [workspace.package] diff --git a/README.md b/README.md index e77718e..4ccdae7 100644 --- a/README.md +++ b/README.md @@ -36,48 +36,59 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on ## 2 · Quick Start -The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines. +The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines. ```rust use harmony::{ - data::Version, inventory::Inventory, - maestro::Maestro, modules::{ - lamp::{LAMPConfig, LAMPScore}, - monitoring::monitoring_alerting::MonitoringAlertingStackScore, + application::{ + ApplicationScore, RustWebFramework, RustWebapp, + features::{PackagingDeployment, rhob_monitoring::Monitoring}, + }, + monitoring::alert_channel::discord_alert_channel::DiscordWebhook, }, - topology::{K8sAnywhereTopology, Url}, + topology::K8sAnywhereTopology, }; +use harmony_macros::hurl; +use std::{path::PathBuf, sync::Arc}; #[tokio::main] async fn main() { - // 1. Describe what you want - let lamp_stack = LAMPScore { - name: "harmony-lamp-demo".into(), - domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()), - php_version: Version::from("8.3.0").unwrap(), - config: LAMPConfig { - project_root: "./php".into(), - database_size: "4Gi".into(), - ..Default::default() - }, + let application = Arc::new(RustWebapp { + name: "harmony-example-leptos".to_string(), + project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder + framework: Some(RustWebFramework::Leptos), + service_port: 8080, + }); + + // Define your Application deployment and the features you want + let app = ApplicationScore { + features: vec![ + Box::new(PackagingDeployment { + application: application.clone(), + }), + Box::new(Monitoring { + application: application.clone(), + alert_receiver: vec![ + Box::new(DiscordWebhook { + name: "test-discord".to_string(), + url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url + }), + ], + }), + ], + application, }; - // 2. Enhance with extra scores (monitoring, CI/CD, …) - let mut monitoring = MonitoringAlertingStackScore::new(); - monitoring.namespace = Some(lamp_stack.config.namespace.clone()); - - // 3. Run your scores on the desired topology & inventory harmony_cli::run( - Inventory::autoload(), // auto-detect hardware / kube-config - K8sAnywhereTopology::from_env(), // local k3d, CI, staging, prod… - vec![ - Box::new(lamp_stack), - Box::new(monitoring) - ], - None - ).await.unwrap(); + Inventory::autoload(), + K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster + vec![Box::new(app)], + None, + ) + .await + .unwrap(); } ``` diff --git a/brocade/Cargo.toml b/brocade/Cargo.toml new file mode 100644 index 0000000..89c4fb8 --- /dev/null +++ b/brocade/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "brocade" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +async-trait.workspace = true +harmony_types = { path = "../harmony_types" } +russh.workspace = true +russh-keys.workspace = true +tokio.workspace = true +log.workspace = true +env_logger.workspace = true +regex = "1.11.3" +harmony_secret = { path = "../harmony_secret" } +serde.workspace = true diff --git a/brocade/examples/main.rs b/brocade/examples/main.rs new file mode 100644 index 0000000..34dec21 --- /dev/null +++ b/brocade/examples/main.rs @@ -0,0 +1,70 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use brocade::BrocadeOptions; +use harmony_secret::{Secret, SecretManager}; +use harmony_types::switch::PortLocation; +use serde::{Deserialize, Serialize}; + +#[derive(Secret, Clone, Debug, Serialize, Deserialize)] +struct BrocadeSwitchAuth { + username: String, + password: String, +} + +#[tokio::main] +async fn main() { + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + + // let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet + let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1 + // let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st + let switch_addresses = vec![ip]; + + let config = SecretManager::get_or_prompt::() + .await + .unwrap(); + + let brocade = brocade::init( + &switch_addresses, + 22, + &config.username, + &config.password, + Some(BrocadeOptions { + dry_run: true, + ..Default::default() + }), + ) + .await + .expect("Brocade client failed to connect"); + + let entries = brocade.get_stack_topology().await.unwrap(); + println!("Stack topology: {entries:#?}"); + + let entries = brocade.get_interfaces().await.unwrap(); + println!("Interfaces: {entries:#?}"); + + let version = brocade.version().await.unwrap(); + println!("Version: {version:?}"); + + println!("--------------"); + let mac_adddresses = brocade.get_mac_address_table().await.unwrap(); + println!("VLAN\tMAC\t\t\tPORT"); + for mac in mac_adddresses { + println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port); + } + + println!("--------------"); + let channel_name = "1"; + brocade.clear_port_channel(channel_name).await.unwrap(); + + println!("--------------"); + let channel_id = brocade.find_available_channel_id().await.unwrap(); + + println!("--------------"); + let channel_name = "HARMONY_LAG"; + let ports = [PortLocation(2, 0, 35)]; + brocade + .create_port_channel(channel_id, channel_name, &ports) + .await + .unwrap(); +} diff --git a/brocade/src/fast_iron.rs b/brocade/src/fast_iron.rs new file mode 100644 index 0000000..a1a2478 --- /dev/null +++ b/brocade/src/fast_iron.rs @@ -0,0 +1,211 @@ +use super::BrocadeClient; +use crate::{ + BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry, + PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell, +}; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; +use regex::Regex; +use std::{collections::HashSet, str::FromStr}; + +pub struct FastIronClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl FastIronClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["skip-page-display".into()]); + shell.after_all(vec!["page".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 3 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 3 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[2].to_string(), + ), + _ => ( + 1, + parse_brocade_mac_address(parts[0]).ok()?, + parts[1].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_stack_port_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing stack port entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[0]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: None, + })) + } + + fn build_port_channel_commands( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Vec { + let mut commands = vec![ + "configure terminal".to_string(), + format!("lag {channel_name} static id {channel_id}"), + ]; + + for port in ports { + commands.push(format!("ports ethernet {port}")); + } + + commands.push(format!("primary-port {}", ports[0])); + commands.push("deploy".into()); + commands.push("exit".into()); + commands.push("write memory".into()); + commands.push("exit".into()); + + commands + } +} + +#[async_trait] +impl BrocadeClient for FastIronClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + info!("[Brocade] Showing MAC address table..."); + + let output = self + .shell + .run_command("show mac-address", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show interface stack-ports", crate::ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_stack_port_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + todo!() + } + + async fn configure_interfaces( + &self, + _interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + todo!() + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show lag", ExecutionMode::Regular) + .await?; + let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex"); + + let used_ids: HashSet = output + .lines() + .filter_map(|line| { + re.captures(line) + .and_then(|c| c.get(1)) + .and_then(|id_match| id_match.as_str().parse().ok()) + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let commands = self.build_port_channel_commands(channel_id, channel_name, ports); + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".to_string(), + format!("no lag {channel_name}"), + "write memory".to_string(), + ]; + self.shell + .run_commands(commands, ExecutionMode::Privileged) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/lib.rs b/brocade/src/lib.rs new file mode 100644 index 0000000..3822abd --- /dev/null +++ b/brocade/src/lib.rs @@ -0,0 +1,336 @@ +use std::net::IpAddr; +use std::{ + fmt::{self, Display}, + time::Duration, +}; + +use crate::network_operating_system::NetworkOperatingSystemClient; +use crate::{ + fast_iron::FastIronClient, + shell::{BrocadeSession, BrocadeShell}, +}; + +use async_trait::async_trait; +use harmony_types::net::MacAddress; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use regex::Regex; + +mod fast_iron; +mod network_operating_system; +mod shell; +mod ssh; + +#[derive(Default, Clone, Debug)] +pub struct BrocadeOptions { + pub dry_run: bool, + pub ssh: ssh::SshOptions, + pub timeouts: TimeoutConfig, +} + +#[derive(Clone, Debug)] +pub struct TimeoutConfig { + pub shell_ready: Duration, + pub command_execution: Duration, + pub cleanup: Duration, + pub message_wait: Duration, +} + +impl Default for TimeoutConfig { + fn default() -> Self { + Self { + shell_ready: Duration::from_secs(10), + command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while + cleanup: Duration::from_secs(10), + message_wait: Duration::from_millis(500), + } + } +} + +enum ExecutionMode { + Regular, + Privileged, +} + +#[derive(Clone, Debug)] +pub struct BrocadeInfo { + os: BrocadeOs, + version: String, +} + +#[derive(Clone, Debug)] +pub enum BrocadeOs { + NetworkOperatingSystem, + FastIron, + Unknown, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct MacAddressEntry { + pub vlan: u16, + pub mac_address: MacAddress, + pub port: PortDeclaration, +} + +pub type PortChannelId = u8; + +/// Represents a single physical or logical link connecting two switches within a stack or fabric. +/// +/// This structure provides a standardized view of the topology regardless of the +/// underlying Brocade OS configuration (stacking vs. fabric). +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterSwitchLink { + /// The local port on the switch where the topology command was run. + pub local_port: PortLocation, + /// The port on the directly connected neighboring switch. + pub remote_port: Option, +} + +/// Represents the key running configuration status of a single switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InterfaceInfo { + /// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2"). + pub name: String, + /// The physical location of the interface. + pub port_location: PortLocation, + /// The parsed type and name prefix of the interface. + pub interface_type: InterfaceType, + /// The primary configuration mode defining the interface's behavior (L2, L3, Fabric). + pub operating_mode: Option, + /// Indicates the current state of the interface. + pub status: InterfaceStatus, +} + +/// Categorizes the functional type of a switch interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceType { + /// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet). + Ethernet(String), +} + +impl fmt::Display for InterfaceType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InterfaceType::Ethernet(name) => write!(f, "{name}"), + } + } +} + +/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PortOperatingMode { + /// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled). + Fabric, + /// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`). + Trunk, + /// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode). + Access, +} + +/// Defines the possible status of an interface. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InterfaceStatus { + /// The interface is connected. + Connected, + /// The interface is not connected and is not expected to be. + NotConnected, + /// The interface is not connected but is expected to be (configured with `no shutdown`). + SfpAbsent, +} + +pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, +) -> Result, Error> { + let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?; + + let version_info = shell + .with_session(ExecutionMode::Regular, |session| { + Box::pin(get_brocade_info(session)) + }) + .await?; + + Ok(match version_info.os { + BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)), + BrocadeOs::NetworkOperatingSystem => { + Box::new(NetworkOperatingSystemClient::init(shell, version_info)) + } + BrocadeOs::Unknown => todo!(), + }) +} + +#[async_trait] +pub trait BrocadeClient { + /// Retrieves the operating system and version details from the connected Brocade switch. + /// + /// This is typically the first call made after establishing a connection to determine + /// the switch OS family (e.g., FastIron, NOS) for feature compatibility. + /// + /// # Returns + /// + /// A `BrocadeInfo` structure containing parsed OS type and version string. + async fn version(&self) -> Result; + + /// Retrieves the dynamically learned MAC address table from the switch. + /// + /// This is crucial for discovering where specific network endpoints (MAC addresses) + /// are currently located on the physical ports. + /// + /// # Returns + /// + /// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address, + /// and the associated port name/index. + async fn get_mac_address_table(&self) -> Result, Error>; + + /// Derives the physical connections used to link multiple switches together + /// to form a single logical entity (stack, fabric, etc.). + /// + /// This abstracts the underlying configuration (e.g., stack ports, fabric ports) + /// to return a standardized view of the topology. + /// + /// # Returns + /// + /// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric. + /// If the switch is not stacked, returns an empty vector. + async fn get_stack_topology(&self) -> Result, Error>; + + /// Retrieves the status for all interfaces + /// + /// # Returns + /// + /// A vector of `InterfaceInfo` structures. + async fn get_interfaces(&self) -> Result, Error>; + + /// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.). + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error>; + + /// Scans the existing configuration to find the next available (unused) + /// Port-Channel ID (`lag` or `trunk`) for assignment. + /// + /// # Returns + /// + /// The smallest, unassigned `PortChannelId` within the supported range. + async fn find_available_channel_id(&self) -> Result; + + /// Creates and configures a new Port-Channel (Link Aggregation Group or LAG) + /// using the specified channel ID and ports. + /// + /// The resulting configuration must be persistent (saved to startup-config). + /// Assumes a static LAG configuration mode unless specified otherwise by the implementation. + /// + /// # Parameters + /// + /// * `channel_id`: The ID (e.g., 1-128) for the logical port channel. + /// * `channel_name`: A descriptive name for the LAG (used in configuration context). + /// * `ports`: A slice of `PortLocation` structs defining the physical member ports. + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error>; + + /// Removes all configuration associated with the specified Port-Channel name. + /// + /// This operation should be idempotent; attempting to clear a non-existent + /// channel should succeed (or return a benign error). + /// + /// # Parameters + /// + /// * `channel_name`: The name of the Port-Channel (LAG) to delete. + /// + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>; +} + +async fn get_brocade_info(session: &mut BrocadeSession) -> Result { + let output = session.run_command("show version").await?; + + if output.contains("Network Operating System") { + let re = Regex::new(r"Network Operating System Version:\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::NetworkOperatingSystem, + version, + }); + } else if output.contains("ICX") { + let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P[a-zA-Z0-9.\-]+)") + .expect("Invalid regex"); + let version = re + .captures(&output) + .and_then(|cap| cap.name("version")) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(); + + return Ok(BrocadeInfo { + os: BrocadeOs::FastIron, + version, + }); + } + + Err(Error::UnexpectedError("Unknown Brocade OS version".into())) +} + +fn parse_brocade_mac_address(value: &str) -> Result { + let cleaned_mac = value.replace('.', ""); + + if cleaned_mac.len() != 12 { + return Err(format!("Invalid MAC address: {value}")); + } + + let mut bytes = [0u8; 6]; + for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() { + let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?; + bytes[i] = + u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?; + } + + Ok(MacAddress(bytes)) +} + +#[derive(Debug)] +pub enum Error { + NetworkError(String), + AuthenticationError(String), + ConfigurationError(String), + TimeoutError(String), + UnexpectedError(String), + CommandError(String), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::NetworkError(msg) => write!(f, "Network error: {msg}"), + Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"), + Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"), + Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"), + Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"), + Error::CommandError(msg) => write!(f, "{msg}"), + } + } +} + +impl From for String { + fn from(val: Error) -> Self { + format!("{val}") + } +} + +impl std::error::Error for Error {} + +impl From for Error { + fn from(value: russh::Error) -> Self { + Error::NetworkError(format!("Russh client error: {value}")) + } +} diff --git a/brocade/src/network_operating_system.rs b/brocade/src/network_operating_system.rs new file mode 100644 index 0000000..b14bc08 --- /dev/null +++ b/brocade/src/network_operating_system.rs @@ -0,0 +1,306 @@ +use std::str::FromStr; + +use async_trait::async_trait; +use harmony_types::switch::{PortDeclaration, PortLocation}; +use log::{debug, info}; + +use crate::{ + BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, + InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + parse_brocade_mac_address, shell::BrocadeShell, +}; + +pub struct NetworkOperatingSystemClient { + shell: BrocadeShell, + version: BrocadeInfo, +} + +impl NetworkOperatingSystemClient { + pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { + shell.before_all(vec!["terminal length 0".into()]); + + Self { + shell, + version: version_info, + } + } + + fn parse_mac_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing mac address entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 5 { + return None; + } + + let (vlan, mac_address, port) = match parts.len() { + 5 => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[4].to_string(), + ), + _ => ( + u16::from_str(parts[0]).ok()?, + parse_brocade_mac_address(parts[1]).ok()?, + parts[5].to_string(), + ), + }; + + let port = + PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); + + match port { + Ok(p) => Some(Ok(MacAddressEntry { + vlan, + mac_address, + port: p, + })), + Err(e) => Some(Err(e)), + } + } + + fn parse_inter_switch_link_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing inter switch link entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 10 { + return None; + } + + let local_port = PortLocation::from_str(parts[2]).ok()?; + let remote_port = PortLocation::from_str(parts[5]).ok()?; + + Some(Ok(InterSwitchLink { + local_port, + remote_port: Some(remote_port), + })) + } + + fn parse_interface_status_entry(&self, line: &str) -> Option> { + debug!("[Brocade] Parsing interface status entry: {line}"); + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 6 { + return None; + } + + let interface_type = match parts[0] { + "Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()), + "Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()), + _ => return None, + }; + let port_location = PortLocation::from_str(parts[1]).ok()?; + let status = match parts[2] { + "connected" => InterfaceStatus::Connected, + "notconnected" => InterfaceStatus::NotConnected, + "sfpAbsent" => InterfaceStatus::SfpAbsent, + _ => return None, + }; + let operating_mode = match parts[3] { + "ISL" => Some(PortOperatingMode::Fabric), + "Trunk" => Some(PortOperatingMode::Trunk), + "Access" => Some(PortOperatingMode::Access), + "--" => None, + _ => return None, + }; + + Some(Ok(InterfaceInfo { + name: format!("{} {}", interface_type, port_location), + port_location, + interface_type, + operating_mode, + status, + })) + } +} + +#[async_trait] +impl BrocadeClient for NetworkOperatingSystemClient { + async fn version(&self) -> Result { + Ok(self.version.clone()) + } + + async fn get_mac_address_table(&self) -> Result, Error> { + let output = self + .shell + .run_command("show mac-address-table", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(1) + .filter_map(|line| self.parse_mac_entry(line)) + .collect() + } + + async fn get_stack_topology(&self) -> Result, Error> { + let output = self + .shell + .run_command("show fabric isl", ExecutionMode::Regular) + .await?; + + output + .lines() + .skip(6) + .filter_map(|line| self.parse_inter_switch_link_entry(line)) + .collect() + } + + async fn get_interfaces(&self) -> Result, Error> { + let output = self + .shell + .run_command( + "show interface status rbridge-id all", + ExecutionMode::Regular, + ) + .await?; + + output + .lines() + .skip(2) + .filter_map(|line| self.parse_interface_status_entry(line)) + .collect() + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + info!("[Brocade] Configuring {} interface(s)...", interfaces.len()); + + let mut commands = vec!["configure terminal".to_string()]; + + for interface in interfaces { + commands.push(format!("interface {}", interface.0)); + + match interface.1 { + PortOperatingMode::Fabric => { + commands.push("fabric isl enable".into()); + commands.push("fabric trunk enable".into()); + } + PortOperatingMode::Trunk => { + commands.push("switchport".into()); + commands.push("switchport mode trunk".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + PortOperatingMode::Access => { + commands.push("switchport".into()); + commands.push("switchport mode access".into()); + commands.push("switchport access vlan 1".into()); + commands.push("no spanning-tree shutdown".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + } + } + + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Interfaces configured."); + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + info!("[Brocade] Finding next available channel id..."); + + let output = self + .shell + .run_command("show port-channel", ExecutionMode::Regular) + .await?; + + let used_ids: Vec = output + .lines() + .skip(6) + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() < 8 { + return None; + } + + u8::from_str(parts[0]).ok() + }) + .collect(); + + let mut next_id: u8 = 1; + loop { + if !used_ids.contains(&next_id) { + break; + } + next_id += 1; + } + + info!("[Brocade] Found channel id: {next_id}"); + Ok(next_id) + } + + async fn create_port_channel( + &self, + channel_id: PortChannelId, + channel_name: &str, + ports: &[PortLocation], + ) -> Result<(), Error> { + info!( + "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" + ); + + let interfaces = self.get_interfaces().await?; + + let mut commands = vec![ + "configure terminal".into(), + format!("interface port-channel {}", channel_id), + "no shutdown".into(), + "exit".into(), + ]; + + for port in ports { + let interface = interfaces.iter().find(|i| i.port_location == *port); + let Some(interface) = interface else { + continue; + }; + + commands.push(format!("interface {}", interface.name)); + commands.push("no switchport".into()); + commands.push("no ip address".into()); + commands.push("no fabric isl enable".into()); + commands.push("no fabric trunk enable".into()); + commands.push(format!("channel-group {channel_id} mode active")); + commands.push("no shutdown".into()); + commands.push("exit".into()); + } + + commands.push("write memory".into()); + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' configured."); + + Ok(()) + } + + async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { + info!("[Brocade] Clearing port-channel: {channel_name}"); + + let commands = vec![ + "configure terminal".into(), + format!("no interface port-channel {}", channel_name), + "exit".into(), + "write memory".into(), + ]; + + self.shell + .run_commands(commands, ExecutionMode::Regular) + .await?; + + info!("[Brocade] Port-channel '{channel_name}' cleared."); + Ok(()) + } +} diff --git a/brocade/src/shell.rs b/brocade/src/shell.rs new file mode 100644 index 0000000..cfa672d --- /dev/null +++ b/brocade/src/shell.rs @@ -0,0 +1,367 @@ +use std::net::IpAddr; +use std::time::Duration; +use std::time::Instant; + +use crate::BrocadeOptions; +use crate::Error; +use crate::ExecutionMode; +use crate::TimeoutConfig; +use crate::ssh; + +use log::debug; +use log::info; +use russh::ChannelMsg; +use tokio::time::timeout; + +pub struct BrocadeShell { + ip: IpAddr, + port: u16, + username: String, + password: String, + options: BrocadeOptions, + before_all_commands: Vec, + after_all_commands: Vec, +} + +impl BrocadeShell { + pub async fn init( + ip_addresses: &[IpAddr], + port: u16, + username: &str, + password: &str, + options: Option, + ) -> Result { + let ip = ip_addresses + .first() + .ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?; + + let base_options = options.unwrap_or_default(); + let options = ssh::try_init_client(username, password, ip, base_options).await?; + + Ok(Self { + ip: *ip, + port, + username: username.to_string(), + password: password.to_string(), + before_all_commands: vec![], + after_all_commands: vec![], + options, + }) + } + + pub async fn open_session(&self, mode: ExecutionMode) -> Result { + BrocadeSession::open( + self.ip, + self.port, + &self.username, + &self.password, + self.options.clone(), + mode, + ) + .await + } + + pub async fn with_session(&self, mode: ExecutionMode, callback: F) -> Result + where + F: FnOnce( + &mut BrocadeSession, + ) -> std::pin::Pin< + Box> + Send + '_>, + >, + { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = callback(&mut session).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_command(command).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub async fn run_commands( + &self, + commands: Vec, + mode: ExecutionMode, + ) -> Result<(), Error> { + let mut session = self.open_session(mode).await?; + + let _ = session.run_commands(self.before_all_commands.clone()).await; + let result = session.run_commands(commands).await; + let _ = session.run_commands(self.after_all_commands.clone()).await; + + session.close().await?; + result + } + + pub fn before_all(&mut self, commands: Vec) { + self.before_all_commands = commands; + } + + pub fn after_all(&mut self, commands: Vec) { + self.after_all_commands = commands; + } +} + +pub struct BrocadeSession { + pub channel: russh::Channel, + pub mode: ExecutionMode, + pub options: BrocadeOptions, +} + +impl BrocadeSession { + pub async fn open( + ip: IpAddr, + port: u16, + username: &str, + password: &str, + options: BrocadeOptions, + mode: ExecutionMode, + ) -> Result { + let client = ssh::create_client(ip, port, username, password, &options).await?; + let mut channel = client.channel_open_session().await?; + + channel + .request_pty(false, "vt100", 80, 24, 0, 0, &[]) + .await?; + channel.request_shell(false).await?; + + wait_for_shell_ready(&mut channel, &options.timeouts).await?; + + if let ExecutionMode::Privileged = mode { + try_elevate_session(&mut channel, username, password, &options.timeouts).await?; + } + + Ok(Self { + channel, + mode, + options, + }) + } + + pub async fn close(&mut self) -> Result<(), Error> { + debug!("[Brocade] Closing session..."); + + self.channel.data(&b"exit\n"[..]).await?; + if let ExecutionMode::Privileged = self.mode { + self.channel.data(&b"exit\n"[..]).await?; + } + + let start = Instant::now(); + while start.elapsed() < self.options.timeouts.cleanup { + match timeout(self.options.timeouts.message_wait, self.channel.wait()).await { + Ok(Some(ChannelMsg::Close)) => break, + Ok(Some(_)) => continue, + Ok(None) | Err(_) => break, + } + } + + debug!("[Brocade] Session closed."); + Ok(()) + } + + pub async fn run_command(&mut self, command: &str) -> Result { + if self.should_skip_command(command) { + return Ok(String::new()); + } + + debug!("[Brocade] Running command: '{command}'..."); + + self.channel + .data(format!("{}\n", command).as_bytes()) + .await?; + tokio::time::sleep(Duration::from_millis(100)).await; + + let output = self.collect_command_output().await?; + let output = String::from_utf8(output) + .map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?; + + self.check_for_command_errors(&output, command)?; + Ok(output) + } + + pub async fn run_commands(&mut self, commands: Vec) -> Result<(), Error> { + for command in commands { + self.run_command(&command).await?; + } + Ok(()) + } + + fn should_skip_command(&self, command: &str) -> bool { + if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run { + info!("[Brocade] Dry-run mode enabled, skipping command: {command}"); + return true; + } + false + } + + async fn collect_command_output(&mut self) -> Result, Error> { + let mut output = Vec::new(); + let start = Instant::now(); + let read_timeout = Duration::from_millis(500); + let log_interval = Duration::from_secs(3); + let mut last_log = Instant::now(); + + loop { + if start.elapsed() > self.options.timeouts.command_execution { + return Err(Error::TimeoutError( + "Timeout waiting for command completion.".into(), + )); + } + + if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval { + info!("[Brocade] Waiting for command output..."); + last_log = Instant::now(); + } + + match timeout(read_timeout, self.channel.wait()).await { + Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => { + output.extend_from_slice(&data); + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output), + Ok(Some(ChannelMsg::ExitStatus { exit_status })) => { + debug!("[Brocade] Command exit status: {exit_status}"); + } + Ok(Some(_)) => continue, + Ok(None) | Err(_) => { + if output.is_empty() { + if let Ok(None) = timeout(read_timeout, self.channel.wait()).await { + break; + } + continue; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + let current_output = String::from_utf8_lossy(&output); + if current_output.contains('>') || current_output.contains('#') { + return Ok(output); + } + } + } + } + + Ok(output) + } + + fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> { + const ERROR_PATTERNS: &[&str] = &[ + "invalid input", + "syntax error", + "command not found", + "unknown command", + "permission denied", + "access denied", + "authentication failed", + "configuration error", + "failed to", + "error:", + ]; + + let output_lower = output.to_lowercase(); + if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) { + return Err(Error::CommandError(format!( + "Command '{command}' failed: {}", + output.trim() + ))); + } + + if !command.starts_with("show") && output.trim().is_empty() { + return Err(Error::CommandError(format!( + "Command '{command}' produced no output" + ))); + } + + Ok(()) + } +} + +async fn wait_for_shell_ready( + channel: &mut russh::Channel, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + let mut buffer = Vec::new(); + let start = Instant::now(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + let output = output.trim(); + if output.ends_with('>') || output.ends_with('#') { + debug!("[Brocade] Shell ready"); + return Ok(()); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + Ok(()) +} + +async fn try_elevate_session( + channel: &mut russh::Channel, + username: &str, + password: &str, + timeouts: &TimeoutConfig, +) -> Result<(), Error> { + channel.data(&b"enable\n"[..]).await?; + let start = Instant::now(); + let mut buffer = Vec::new(); + + while start.elapsed() < timeouts.shell_ready { + match timeout(timeouts.message_wait, channel.wait()).await { + Ok(Some(ChannelMsg::Data { data })) => { + buffer.extend_from_slice(&data); + let output = String::from_utf8_lossy(&buffer); + + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + return Ok(()); + } + + if output.contains("User Name:") { + channel.data(format!("{}\n", username).as_bytes()).await?; + buffer.clear(); + } else if output.contains("Password:") { + channel.data(format!("{}\n", password).as_bytes()).await?; + buffer.clear(); + } else if output.contains('>') { + return Err(Error::AuthenticationError( + "Enable authentication failed".into(), + )); + } + } + Ok(Some(_)) => continue, + Ok(None) => break, + Err(_) => continue, + } + } + + let output = String::from_utf8_lossy(&buffer); + if output.ends_with('#') { + debug!("[Brocade] Privileged mode established"); + Ok(()) + } else { + Err(Error::AuthenticationError(format!( + "Enable failed. Output:\n{output}" + ))) + } +} diff --git a/brocade/src/ssh.rs b/brocade/src/ssh.rs new file mode 100644 index 0000000..08ff96f --- /dev/null +++ b/brocade/src/ssh.rs @@ -0,0 +1,113 @@ +use std::borrow::Cow; +use std::sync::Arc; + +use async_trait::async_trait; +use russh::client::Handler; +use russh::kex::DH_G1_SHA1; +use russh::kex::ECDH_SHA2_NISTP256; +use russh_keys::key::SSH_RSA; + +use super::BrocadeOptions; +use super::Error; + +#[derive(Default, Clone, Debug)] +pub struct SshOptions { + pub preferred_algorithms: russh::Preferred, +} + +impl SshOptions { + fn ecdhsa_sha2_nistp256() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } + + fn legacy() -> Self { + Self { + preferred_algorithms: russh::Preferred { + kex: Cow::Borrowed(&[DH_G1_SHA1]), + key: Cow::Borrowed(&[SSH_RSA]), + ..Default::default() + }, + } + } +} + +pub struct Client; + +#[async_trait] +impl Handler for Client { + type Error = Error; + + async fn check_server_key( + &mut self, + _server_public_key: &russh_keys::key::PublicKey, + ) -> Result { + Ok(true) + } +} + +pub async fn try_init_client( + username: &str, + password: &str, + ip: &std::net::IpAddr, + base_options: BrocadeOptions, +) -> Result { + let ssh_options = vec![ + SshOptions::default(), + SshOptions::ecdhsa_sha2_nistp256(), + SshOptions::legacy(), + ]; + + for ssh in ssh_options { + let opts = BrocadeOptions { + ssh, + ..base_options.clone() + }; + let client = create_client(*ip, 22, username, password, &opts).await; + + match client { + Ok(_) => { + return Ok(opts); + } + Err(e) => match e { + Error::NetworkError(e) => { + if e.contains("No common key exchange algorithm") { + continue; + } else { + return Err(Error::NetworkError(e)); + } + } + _ => return Err(e), + }, + } + } + + Err(Error::NetworkError( + "Could not establish ssh connection: wrong key exchange algorithm)".to_string(), + )) +} + +pub async fn create_client( + ip: std::net::IpAddr, + port: u16, + username: &str, + password: &str, + options: &BrocadeOptions, +) -> Result, Error> { + let config = russh::client::Config { + preferred: options.ssh.preferred_algorithms.clone(), + ..Default::default() + }; + let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?; + if !client.authenticate_password(username, password).await? { + return Err(Error::AuthenticationError( + "ssh authentication failed".to_string(), + )); + } + Ok(client) +} diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/.gitignore b/demos/cncf-k8s-quebec-meetup-september-2025/.gitignore new file mode 100644 index 0000000..942a2db --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/.gitignore @@ -0,0 +1,3 @@ +.terraform +*.tfstate +venv diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg new file mode 100644 index 0000000..76a2cd7 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg new file mode 100644 index 0000000..6755b3b Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed.jpg new file mode 100644 index 0000000..d9942ae Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_1hit.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_1hit.jpg new file mode 100644 index 0000000..c14a7c6 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_1hit.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_2hit.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_2hit.jpg new file mode 100644 index 0000000..183f796 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_2hit.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_fullhit.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_fullhit.jpg new file mode 100644 index 0000000..f03adfe Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_reversed_fullhit.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_sunglasses.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_sunglasses.jpg new file mode 100644 index 0000000..a66fc51 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer_sunglasses.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/README.md b/demos/cncf-k8s-quebec-meetup-september-2025/README.md new file mode 100644 index 0000000..c91d5bd --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/README.md @@ -0,0 +1,5 @@ +To build : + +```bash +npx @marp-team/marp-cli@latest -w slides.md +``` diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg new file mode 100644 index 0000000..216dbb1 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible/README.md b/demos/cncf-k8s-quebec-meetup-september-2025/ansible/README.md new file mode 100644 index 0000000..31adf5e --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/ansible/README.md @@ -0,0 +1,9 @@ +To run this : + +```bash +virtualenv venv +source venv/bin/activate +pip install ansible ansible-dev-tools +ansible-lint download.yml +ansible-playbook -i localhost download.yml +``` diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible/download.yml b/demos/cncf-k8s-quebec-meetup-september-2025/ansible/download.yml new file mode 100644 index 0000000..29d7663 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/ansible/download.yml @@ -0,0 +1,8 @@ +- name: Test Ansible URL Validation + hosts: localhost + tasks: + - name: Download a file + ansible.builtin.get_url: + url: "http:/wikipedia.org/" + dest: "/tmp/ansible-test/wikipedia.html" + mode: '0900' diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible_crossed_out.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_crossed_out.jpg new file mode 100644 index 0000000..985c2bc Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_crossed_out.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg new file mode 100644 index 0000000..cd54361 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/ansible_output_fail.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_output_fail.jpg new file mode 100644 index 0000000..30bcdba Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/ansible_output_fail.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/happy_landscape_swimmer.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/happy_landscape_swimmer.jpg new file mode 100644 index 0000000..f7b8107 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/happy_landscape_swimmer.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/qrcode_gitea_nationtech.png b/demos/cncf-k8s-quebec-meetup-september-2025/qrcode_gitea_nationtech.png new file mode 100644 index 0000000..4ad8de5 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/qrcode_gitea_nationtech.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/slides.html b/demos/cncf-k8s-quebec-meetup-september-2025/slides.html new file mode 100644 index 0000000..1636148 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/slides.html @@ -0,0 +1,195 @@ +Voici l'histoire de Petit Poisson
+

Voici l'histoire de Petit Poisson

+
+
+ +
+
+ +
+
+ + +

https://tryrust.org

+
+
+ +
+
+ +
+
+ +
+
+ +
+
+

Demo time

+
+
+ +
+
+ +
+
+ +

Ansible❓

+
+
+ +
- name: Download wikipedia
+  hosts: localhost
+  tasks:
+    - name: Download a file
+      ansible.builtin.get_url:
+        url: "https:/wikipedia.org/"
+        dest: "/tmp/ansible-test/wikipedia.html"
+        mode: '0900'
+
+
+
+ +
ansible-lint download.yml
+
+Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'.
+
+
+
+
git push
+
+
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +

Terraform❓❗

+
+
+ + +
provider "docker" {}
+
+resource "docker_network" "invalid_network" {
+  name = "my-invalid-network"
+
+  ipam_config {
+    subnet = "172.17.0.0/33"
+  }
+}
+
+
+
+ + +
terraform plan
+
+Terraform used the selected providers to generate the following execution plan.
+Resource actions are indicated with the following symbols:
+  + create
+
+Terraform will perform the following actions:
+
+  # docker_network.invalid_network will be created
+  + resource "docker_network" "invalid_network" {
+      + driver      = (known after apply)
+      + id          = (known after apply)
+      + internal    = (known after apply)
+      + ipam_driver = "default"
+      + name        = "my-invalid-network"
+      + options     = (known after apply)
+      + scope       = (known after apply)
+
+      + ipam_config {
+          + subnet   = "172.17.0.0/33"
+            # (2 unchanged attributes hidden)
+        }
+    }
+
+Plan: 1 to add, 0 to change, 0 to destroy.
+
+
+
+

✅

+
+
+
terraform apply
+
+
+
+
Plan: 1 to add, 0 to change, 0 to destroy.
+
+Do you want to perform these actions?
+  Terraform will perform the actions described above.
+  Only 'yes' will be accepted to approve.
+
+  Enter a value: yes
+
+
+
+
docker_network.invalid_network: Creating...
+╷
+│ Error: Unable to create network: Error response from daemon: invalid network config:
+│ invalid subnet 172.17.0.0/33: invalid CIDR block notation
+│
+│   with docker_network.invalid_network,
+│   on main.tf line 11, in resource "docker_network" "invalid_network":
+│   11: resource "docker_network" "invalid_network" {
+│
+╵
+
+
+
+ +
+
+ + + +
+
+

Harmony❓❗

+
+
+

Demo time

+
+
+ +
+
+

🎼

+

Harmony : https://git.nationtech.io/nationtech/harmony

+ +

LinkedIn : https://www.linkedin.com/in/jean-gabriel-gill-couture/

+

Courriel : jg@nationtech.io

+
+
\ No newline at end of file diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/slides.md b/demos/cncf-k8s-quebec-meetup-september-2025/slides.md new file mode 100644 index 0000000..2060883 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/slides.md @@ -0,0 +1,241 @@ +--- +theme: uncover +--- + +# Voici l'histoire de Petit Poisson + +--- + + + +--- + + + +--- + + + + + +[https://tryrust.org](https://tryrust.org) + +--- + + + +--- + + + +--- + + + +--- + + + +--- + +## Demo time + +--- + + + +--- + + + +--- + + + +## Ansible❓ + +--- + + + +```yaml +- name: Download wikipedia + hosts: localhost + tasks: + - name: Download a file + ansible.builtin.get_url: + url: "https:/wikipedia.org/" + dest: "/tmp/ansible-test/wikipedia.html" + mode: '0900' +``` + +--- + + + +``` +ansible-lint download.yml + +Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'. +``` + +--- + +``` +git push +``` + +--- + + + +--- + + + +--- + + + +--- + + + +--- + + + +--- + + + +--- + + + + +## Terraform❓❗ + +--- + + + + +```tf +provider "docker" {} + +resource "docker_network" "invalid_network" { + name = "my-invalid-network" + + ipam_config { + subnet = "172.17.0.0/33" + } +} +``` + +--- + + + + +``` +terraform plan + +Terraform used the selected providers to generate the following execution plan. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # docker_network.invalid_network will be created + + resource "docker_network" "invalid_network" { + + driver = (known after apply) + + id = (known after apply) + + internal = (known after apply) + + ipam_driver = "default" + + name = "my-invalid-network" + + options = (known after apply) + + scope = (known after apply) + + + ipam_config { + + subnet = "172.17.0.0/33" + # (2 unchanged attributes hidden) + } + } + +Plan: 1 to add, 0 to change, 0 to destroy. +``` + +--- + +✅ + +--- + +``` +terraform apply +``` + +--- + +``` +Plan: 1 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes +``` + +--- + +``` +docker_network.invalid_network: Creating... +╷ +│ Error: Unable to create network: Error response from daemon: invalid network config: +│ invalid subnet 172.17.0.0/33: invalid CIDR block notation +│ +│ with docker_network.invalid_network, +│ on main.tf line 11, in resource "docker_network" "invalid_network": +│ 11: resource "docker_network" "invalid_network" { +│ +╵ +``` + +--- + + + + +--- + + + + + +--- + +## Harmony❓❗ + +--- + +Demo time + +--- + + + +--- + +# 🎼 + +Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony) + + + + + +LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/) + +Courriel : [jg@nationtech.io](mailto:jg@nationtech.io) diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg new file mode 100644 index 0000000..bdbcd03 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform/.terraform.lock.hcl b/demos/cncf-k8s-quebec-meetup-september-2025/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..47aef22 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/terraform/.terraform.lock.hcl @@ -0,0 +1,40 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/http" { + version = "3.5.0" + hashes = [ + "h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=", + "zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7", + "zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736", + "zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476", + "zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90", + "zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a", + "zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6", + "zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6", + "zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee", + "zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926", + "zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.5.3" + hashes = [ + "h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=", + "zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927", + "zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e", + "zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf", + "zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d", + "zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09", + "zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f", + "zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1", + "zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6", + "zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47", + "zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82", + ] +} diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform/main.tf b/demos/cncf-k8s-quebec-meetup-september-2025/terraform/main.tf new file mode 100644 index 0000000..960302a --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/terraform/main.tf @@ -0,0 +1,10 @@ +provider "http" {} + +data "http" "remote_file" { + url = "http:/example.com/file.txt" +} + +resource "local_file" "downloaded_file" { + content = data.http.remote_file.body + filename = "${path.module}/downloaded_file.txt" +} diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/.terraform.lock.hcl b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/.terraform.lock.hcl new file mode 100644 index 0000000..3a24d45 --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/.terraform.lock.hcl @@ -0,0 +1,24 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/kreuzwerker/docker" { + version = "3.0.2" + constraints = "~> 3.0.1" + hashes = [ + "h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=", + "zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f", + "zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95", + "zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138", + "zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da", + "zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2", + "zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06", + "zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1", + "zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710", + "zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005", + "zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d", + "zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792", + "zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387", + "zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62", + "zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d", + ] +} diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/main.tf b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/main.tf new file mode 100644 index 0000000..079fe2d --- /dev/null +++ b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_2/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "~> 3.0.1" # Adjust version as needed + } + } +} +provider "docker" {} + +resource "docker_network" "invalid_network" { + name = "my-invalid-network" + + ipam_config { + subnet = "172.17.0.0/33" + } +} diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform_crossed_out.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_crossed_out.jpg new file mode 100644 index 0000000..1b239de Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_crossed_out.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg new file mode 100644 index 0000000..db9a782 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail_output.jpg b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail_output.jpg new file mode 100644 index 0000000..8cb92e9 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail_output.jpg differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_1.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_1.png new file mode 100644 index 0000000..edd9fbc Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_1.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_2.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_2.png new file mode 100644 index 0000000..96669b9 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_2.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_3.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_3.png new file mode 100644 index 0000000..5e05031 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_3.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_4.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_4.png new file mode 100644 index 0000000..cc462c3 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_deploy_prod_4.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia.png new file mode 100644 index 0000000..119985e Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia_fail.png b/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia_fail.png new file mode 100644 index 0000000..e2e8176 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/texto_download_wikipedia_fail.png differ diff --git a/demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png b/demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png new file mode 100644 index 0000000..99f2e71 Binary files /dev/null and b/demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png differ diff --git a/examples/application_monitoring_with_tenant/src/main.rs b/examples/application_monitoring_with_tenant/src/main.rs index f46a993..ad6e634 100644 --- a/examples/application_monitoring_with_tenant/src/main.rs +++ b/examples/application_monitoring_with_tenant/src/main.rs @@ -27,7 +27,6 @@ async fn main() { }; let application = Arc::new(RustWebapp { name: "example-monitoring".to_string(), - domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()), project_root: PathBuf::from("./examples/rust/webapp"), framework: Some(RustWebFramework::Leptos), service_port: 3000, diff --git a/examples/okd_installation/src/topology.rs b/examples/okd_installation/src/topology.rs index 02553a5..31062f5 100644 --- a/examples/okd_installation/src/topology.rs +++ b/examples/okd_installation/src/topology.rs @@ -1,6 +1,6 @@ use cidr::Ipv4Cidr; use harmony::{ - hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + hardware::{Location, SwitchGroup}, infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, diff --git a/examples/openbao/Cargo.toml b/examples/openbao/Cargo.toml new file mode 100644 index 0000000..ae0a793 --- /dev/null +++ b/examples/openbao/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "example-openbao" +edition = "2024" +version.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +harmony = { path = "../../harmony" } +harmony_cli = { path = "../../harmony_cli" } +harmony_macros = { path = "../../harmony_macros" } +harmony_types = { path = "../../harmony_types" } +tokio.workspace = true +url.workspace = true diff --git a/examples/openbao/README.md b/examples/openbao/README.md new file mode 100644 index 0000000..d78556c --- /dev/null +++ b/examples/openbao/README.md @@ -0,0 +1,7 @@ +To install an openbao instance with harmony simply `cargo run -p example-openbao` . + +Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster. + +Then follow the openbao documentation to initialize and unseal, this will make openbao usable. + +https://openbao.org/docs/platform/k8s/helm/run/ diff --git a/examples/openbao/src/main.rs b/examples/openbao/src/main.rs new file mode 100644 index 0000000..52c5119 --- /dev/null +++ b/examples/openbao/src/main.rs @@ -0,0 +1,67 @@ +use std::{collections::HashMap, str::FromStr}; + +use harmony::{ + inventory::Inventory, + modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString}, + topology::K8sAnywhereTopology, +}; +use harmony_macros::hurl; + +#[tokio::main] +async fn main() { + let values_yaml = Some( + r#"server: + standalone: + enabled: true + config: | + listener "tcp" { + tls_disable = true + address = "[::]:8200" + cluster_address = "[::]:8201" + } + + storage "file" { + path = "/openbao/data" + } + + service: + enabled: true + + dataStorage: + enabled: true + size: 10Gi + storageClass: null + accessMode: ReadWriteOnce + + auditStorage: + enabled: true + size: 10Gi + storageClass: null + accessMode: ReadWriteOnce"# + .to_string(), + ); + let openbao = HelmChartScore { + namespace: Some(NonBlankString::from_str("openbao").unwrap()), + release_name: NonBlankString::from_str("openbao").unwrap(), + chart_name: NonBlankString::from_str("openbao/openbao").unwrap(), + chart_version: None, + values_overrides: None, + values_yaml, + create_namespace: true, + install_only: true, + repository: Some(HelmRepository::new( + "openbao".to_string(), + hurl!("https://openbao.github.io/openbao-helm"), + true, + )), + }; + + harmony_cli::run( + Inventory::autoload(), + K8sAnywhereTopology::from_env(), + vec![Box::new(openbao)], + None, + ) + .await + .unwrap(); +} diff --git a/examples/rhob_application_monitoring/src/main.rs b/examples/rhob_application_monitoring/src/main.rs index dd6a05c..14ef2bd 100644 --- a/examples/rhob_application_monitoring/src/main.rs +++ b/examples/rhob_application_monitoring/src/main.rs @@ -4,8 +4,7 @@ use harmony::{ inventory::Inventory, modules::{ application::{ - ApplicationScore, RustWebFramework, RustWebapp, - features::rhob_monitoring::RHOBMonitoring, + ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring, }, monitoring::alert_channel::discord_alert_channel::DiscordWebhook, }, @@ -17,7 +16,6 @@ use harmony_types::net::Url; async fn main() { let application = Arc::new(RustWebapp { name: "test-rhob-monitoring".to_string(), - domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()), project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param framework: Some(RustWebFramework::Leptos), service_port: 3000, @@ -30,7 +28,7 @@ async fn main() { let app = ApplicationScore { features: vec![ - Box::new(RHOBMonitoring { + Box::new(Monitoring { application: application.clone(), alert_receiver: vec![Box::new(discord_receiver)], }), diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index 063fdb6..624cc88 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -5,7 +5,7 @@ use harmony::{ modules::{ application::{ ApplicationScore, RustWebFramework, RustWebapp, - features::{ContinuousDelivery, Monitoring}, + features::{Monitoring, PackagingDeployment}, }, monitoring::alert_channel::{ discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, @@ -19,7 +19,6 @@ use harmony_macros::hurl; async fn main() { let application = Arc::new(RustWebapp { name: "harmony-example-rust-webapp".to_string(), - domain: hurl!("https://rustapp.harmony.example.com"), project_root: PathBuf::from("./webapp"), framework: Some(RustWebFramework::Leptos), service_port: 3000, @@ -37,7 +36,7 @@ async fn main() { let app = ApplicationScore { features: vec![ - Box::new(ContinuousDelivery { + Box::new(PackagingDeployment { application: application.clone(), }), Box::new(Monitoring { diff --git a/examples/try_rust_webapp/files_to_add/.dockerignore b/examples/try_rust_webapp/files_to_add/.dockerignore new file mode 100644 index 0000000..856a07e --- /dev/null +++ b/examples/try_rust_webapp/files_to_add/.dockerignore @@ -0,0 +1 @@ +harmony diff --git a/examples/try_rust_webapp/files_to_add/Cargo.toml.to_add b/examples/try_rust_webapp/files_to_add/Cargo.toml.to_add new file mode 100644 index 0000000..487671c --- /dev/null +++ b/examples/try_rust_webapp/files_to_add/Cargo.toml.to_add @@ -0,0 +1,20 @@ +[package] +name = "harmony-tryrust" +edition = "2024" +version = "0.1.0" + +[dependencies] +harmony = { path = "../../../nationtech/harmony/harmony" } +harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" } +harmony_types = { path = "../../../nationtech/harmony/harmony_types" } +harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" } +tokio = { version = "1.40", features = [ + "io-std", + "fs", + "macros", + "rt-multi-thread", +] } +log = { version = "0.4", features = ["kv"] } +env_logger = "0.11" +url = "2.5" +base64 = "0.22.1" diff --git a/examples/try_rust_webapp/files_to_add/main.rs b/examples/try_rust_webapp/files_to_add/main.rs new file mode 100644 index 0000000..a4ab320 --- /dev/null +++ b/examples/try_rust_webapp/files_to_add/main.rs @@ -0,0 +1,50 @@ +use harmony::{ + inventory::Inventory, + modules::{ + application::{ + ApplicationScore, RustWebFramework, RustWebapp, + features::{PackagingDeployment, rhob_monitoring::Monitoring}, + }, + monitoring::alert_channel::discord_alert_channel::DiscordWebhook, + }, + topology::K8sAnywhereTopology, +}; +use harmony_macros::hurl; +use std::{path::PathBuf, sync::Arc}; + +#[tokio::main] +async fn main() { + let application = Arc::new(RustWebapp { + name: "tryrust".to_string(), + project_root: PathBuf::from(".."), + framework: Some(RustWebFramework::Leptos), + service_port: 8080, + }); + + let discord_webhook = DiscordWebhook { + name: "harmony_demo".to_string(), + url: hurl!("http://not_a_url.com"), + }; + + let app = ApplicationScore { + features: vec![ + Box::new(PackagingDeployment { + application: application.clone(), + }), + Box::new(Monitoring { + application: application.clone(), + alert_receiver: vec![Box::new(discord_webhook)], + }), + ], + application, + }; + + harmony_cli::run( + Inventory::autoload(), + K8sAnywhereTopology::from_env(), + vec![Box::new(app)], + None, + ) + .await + .unwrap(); +} diff --git a/examples/try_rust_webapp/src/main.rs b/examples/try_rust_webapp/src/main.rs index 6e1ab63..56a058d 100644 --- a/examples/try_rust_webapp/src/main.rs +++ b/examples/try_rust_webapp/src/main.rs @@ -1,41 +1,39 @@ -use std::{path::PathBuf, sync::Arc}; - use harmony::{ inventory::Inventory, modules::{ application::{ ApplicationScore, RustWebFramework, RustWebapp, - features::{ContinuousDelivery, Monitoring}, + features::{PackagingDeployment, rhob_monitoring::Monitoring}, }, monitoring::alert_channel::discord_alert_channel::DiscordWebhook, }, topology::K8sAnywhereTopology, }; -use harmony_types::net::Url; +use harmony_macros::hurl; +use std::{path::PathBuf, sync::Arc}; #[tokio::main] async fn main() { let application = Arc::new(RustWebapp { name: "harmony-example-tryrust".to_string(), - domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()), - project_root: PathBuf::from("./tryrust.org"), + project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a + // submodule framework: Some(RustWebFramework::Leptos), service_port: 8080, }); - let discord_receiver = DiscordWebhook { - name: "test-discord".to_string(), - url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()), - }; - + // Define your Application deployment and the features you want let app = ApplicationScore { features: vec![ - Box::new(ContinuousDelivery { + Box::new(PackagingDeployment { application: application.clone(), }), Box::new(Monitoring { application: application.clone(), - alert_receiver: vec![Box::new(discord_receiver)], + alert_receiver: vec![Box::new(DiscordWebhook { + name: "test-discord".to_string(), + url: hurl!("https://discord.doesnt.exist.com"), + })], }), ], application, @@ -43,7 +41,7 @@ async fn main() { harmony_cli::run( Inventory::autoload(), - K8sAnywhereTopology::from_env(), + K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster vec![Box::new(app)], None, ) diff --git a/harmony/Cargo.toml b/harmony/Cargo.toml index 391628b..634cbe9 100644 --- a/harmony/Cargo.toml +++ b/harmony/Cargo.toml @@ -10,7 +10,11 @@ testing = [] [dependencies] hex = "0.4" -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false } +reqwest = { version = "0.11", features = [ + "blocking", + "json", + "rustls-tls", +], default-features = false } russh = "0.45.0" rust-ipmi = "0.1.1" semver = "1.0.23" @@ -73,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" } askama.workspace = true sqlx.workspace = true inquire.workspace = true +brocade = { path = "../brocade" } +option-ext = "0.2.0" [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index f1abcda..d555d9e 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -34,6 +34,7 @@ pub enum InterpretName { CephClusterHealth, Custom(&'static str), RHOBAlerting, + K8sIngress, } impl std::fmt::Display for InterpretName { @@ -64,6 +65,7 @@ impl std::fmt::Display for InterpretName { InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), InterpretName::Custom(name) => f.write_str(name), InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), + InterpretName::K8sIngress => f.write_str("K8sIngress"), } } } @@ -82,13 +84,15 @@ pub trait Interpret: std::fmt::Debug + Send { pub struct Outcome { pub status: InterpretStatus, pub message: String, + pub details: Vec, } impl Outcome { - pub fn noop() -> Self { + pub fn noop(message: String) -> Self { Self { status: InterpretStatus::NOOP, - message: String::new(), + message, + details: vec![], } } @@ -96,6 +100,23 @@ impl Outcome { Self { status: InterpretStatus::SUCCESS, message, + details: vec![], + } + } + + pub fn success_with_details(message: String, details: Vec) -> Self { + Self { + status: InterpretStatus::SUCCESS, + message, + details, + } + } + + pub fn running(message: String) -> Self { + Self { + status: InterpretStatus::RUNNING, + message, + details: vec![], } } } diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index c9f565e..7be2725 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -1,33 +1,36 @@ use async_trait::async_trait; +use brocade::BrocadeOptions; use harmony_macros::ip; -use harmony_types::net::MacAddress; -use harmony_types::net::Url; +use harmony_secret::SecretManager; +use harmony_types::{ + net::{MacAddress, Url}, + switch::PortLocation, +}; +use k8s_openapi::api::core::v1::Namespace; +use kube::api::ObjectMeta; use log::debug; use log::info; use crate::data::FileContent; use crate::executors::ExecutorError; +use crate::hardware::PhysicalHost; +use crate::infra::brocade::BrocadeSwitchAuth; +use crate::infra::brocade::BrocadeSwitchClient; +use crate::modules::okd::crd::{ + InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec, + nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec}, +}; use crate::topology::PxeOptions; -use super::DHCPStaticEntry; -use super::DhcpServer; -use super::DnsRecord; -use super::DnsRecordType; -use super::DnsServer; -use super::Firewall; -use super::HttpServer; -use super::IpAddress; -use super::K8sclient; -use super::LoadBalancer; -use super::LoadBalancerService; -use super::LogicalHost; -use super::PreparationError; -use super::PreparationOutcome; -use super::Router; -use super::TftpServer; +use super::{ + DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, + HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, + PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer, + Topology, k8s::K8sClient, +}; -use super::Topology; -use super::k8s::K8sClient; +use std::collections::BTreeMap; +use std::net::IpAddr; use std::sync::Arc; #[derive(Debug, Clone)] @@ -89,6 +92,231 @@ impl HAClusterTopology { .to_string() } + async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> { + // FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate) + debug!("Installing NMState operator..."); + let k8s_client = self.k8s_client().await?; + + let nmstate_namespace = Namespace { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + finalizers: Some(vec!["kubernetes".to_string()]), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState namespace: {nmstate_namespace:#?}"); + k8s_client + .apply(&nmstate_namespace, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_operator_group = OperatorGroup { + metadata: ObjectMeta { + name: Some("openshift-nmstate".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: OperatorGroupSpec { + target_namespaces: vec!["openshift-nmstate".to_string()], + }, + }; + debug!("Creating NMState operator group: {nmstate_operator_group:#?}"); + k8s_client + .apply(&nmstate_operator_group, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate_subscription = Subscription { + metadata: ObjectMeta { + name: Some("kubernetes-nmstate-operator".to_string()), + namespace: Some("openshift-nmstate".to_string()), + ..Default::default() + }, + spec: SubscriptionSpec { + channel: Some("stable".to_string()), + install_plan_approval: Some(InstallPlanApproval::Automatic), + name: "kubernetes-nmstate-operator".to_string(), + source: "redhat-operators".to_string(), + source_namespace: "openshift-marketplace".to_string(), + }, + }; + debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}"); + k8s_client + .apply(&nmstate_subscription, None) + .await + .map_err(|e| e.to_string())?; + + let nmstate = NMState { + metadata: ObjectMeta { + name: Some("nmstate".to_string()), + ..Default::default() + }, + ..Default::default() + }; + debug!("Creating NMState: {nmstate:#?}"); + k8s_client + .apply(&nmstate, None) + .await + .map_err(|e| e.to_string())?; + + Ok(()) + } + + fn get_next_bond_id(&self) -> u8 { + 42 // FIXME: Find a better way to declare the bond id + } + + async fn configure_bond( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.ensure_nmstate_operator_installed() + .await + .map_err(|e| { + SwitchError::new(format!( + "Can't configure bond, NMState operator not available: {e}" + )) + })?; + + let bond_config = self.create_bond_configuration(host, config); + debug!("Configuring bond for host {host:?}: {bond_config:#?}"); + self.k8s_client() + .await + .unwrap() + .apply(&bond_config, None) + .await + .unwrap(); + + todo!() + } + + fn create_bond_configuration( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> NodeNetworkConfigurationPolicy { + let host_name = host.id.clone(); + + let bond_id = self.get_next_bond_id(); + let bond_name = format!("bond{bond_id}"); + let mut bond_mtu: Option = None; + let mut bond_mac_address: Option = None; + let mut bond_ports = Vec::new(); + let mut interfaces: Vec = Vec::new(); + + for switch_port in &config.switch_ports { + let interface_name = switch_port.interface.name.clone(); + + interfaces.push(nmstate::InterfaceSpec { + name: interface_name.clone(), + description: Some(format!("Member of bond {bond_name}")), + r#type: "ethernet".to_string(), + state: "up".to_string(), + mtu: Some(switch_port.interface.mtu), + mac_address: Some(switch_port.interface.mac_address.to_string()), + ipv4: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + enabled: Some(false), + ..Default::default() + }), + link_aggregation: None, + ..Default::default() + }); + + bond_ports.push(interface_name); + + // Use the first port's details for the bond mtu and mac address + if bond_mtu.is_none() { + bond_mtu = Some(switch_port.interface.mtu); + } + if bond_mac_address.is_none() { + bond_mac_address = Some(switch_port.interface.mac_address.to_string()); + } + } + + interfaces.push(nmstate::InterfaceSpec { + name: bond_name.clone(), + description: Some(format!("Network bond for host {host_name}")), + r#type: "bond".to_string(), + state: "up".to_string(), + mtu: bond_mtu, + mac_address: bond_mac_address, + ipv4: Some(nmstate::IpStackSpec { + dhcp: Some(true), + enabled: Some(true), + ..Default::default() + }), + ipv6: Some(nmstate::IpStackSpec { + dhcp: Some(true), + autoconf: Some(true), + enabled: Some(true), + ..Default::default() + }), + link_aggregation: Some(nmstate::BondSpec { + mode: "802.3ad".to_string(), + ports: bond_ports, + ..Default::default() + }), + ..Default::default() + }); + + NodeNetworkConfigurationPolicy { + metadata: ObjectMeta { + name: Some(format!("{host_name}-bond-config")), + ..Default::default() + }, + spec: NodeNetworkConfigurationPolicySpec { + node_selector: Some(BTreeMap::from([( + "kubernetes.io/hostname".to_string(), + host_name.to_string(), + )])), + desired_state: nmstate::DesiredStateSpec { interfaces }, + }, + } + } + + async fn get_switch_client(&self) -> Result, SwitchError> { + let auth = SecretManager::get_or_prompt::() + .await + .map_err(|e| SwitchError::new(format!("Failed to get credentials: {e}")))?; + + // FIXME: We assume Brocade switches + let switches: Vec = self.switch.iter().map(|s| s.ip).collect(); + let brocade_options = Some(BrocadeOptions { + dry_run: *crate::config::DRY_RUN, + ..Default::default() + }); + let client = + BrocadeSwitchClient::init(&switches, &auth.username, &auth.password, brocade_options) + .await + .map_err(|e| SwitchError::new(format!("Failed to connect to switch: {e}")))?; + + Ok(Box::new(client)) + } + + async fn configure_port_channel( + &self, + host: &PhysicalHost, + config: &HostNetworkConfig, + ) -> Result<(), SwitchError> { + debug!("Configuring port channel: {config:#?}"); + let client = self.get_switch_client().await?; + + let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect(); + + client + .configure_port_channel(&format!("Harmony_{}", host.id), switch_ports) + .await + .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?; + + Ok(()) + } + pub fn autoload() -> Self { let dummy_infra = Arc::new(DummyInfra {}); let dummy_host = LogicalHost { @@ -263,6 +491,33 @@ impl HttpServer for HAClusterTopology { } } +#[async_trait] +impl Switch for HAClusterTopology { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let client = self.get_switch_client().await?; + client.setup().await?; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let client = self.get_switch_client().await?; + let port = client.find_port(mac_address).await?; + Ok(port) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + self.configure_bond(host, &config).await?; + self.configure_port_channel(host, &config).await + } +} + #[derive(Debug)] pub struct DummyInfra; @@ -332,8 +587,8 @@ impl DhcpServer for DummyInfra { } async fn set_dhcp_range( &self, - start: &IpAddress, - end: &IpAddress, + _start: &IpAddress, + _end: &IpAddress, ) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } diff --git a/harmony/src/domain/topology/ingress.rs b/harmony/src/domain/topology/ingress.rs new file mode 100644 index 0000000..69c9382 --- /dev/null +++ b/harmony/src/domain/topology/ingress.rs @@ -0,0 +1,7 @@ +use crate::topology::PreparationError; +use async_trait::async_trait; + +#[async_trait] +pub trait Ingress { + async fn get_domain(&self, service: &str) -> Result; +} diff --git a/harmony/src/domain/topology/k8s.rs b/harmony/src/domain/topology/k8s.rs index 88bd2e8..5a1e6ec 100644 --- a/harmony/src/domain/topology/k8s.rs +++ b/harmony/src/domain/topology/k8s.rs @@ -1,13 +1,19 @@ +use std::time::Duration; + use derive_new::new; use k8s_openapi::{ ClusterResourceScope, NamespaceResourceScope, - api::{apps::v1::Deployment, core::v1::Pod}, + api::{ + apps::v1::Deployment, + core::v1::{Pod, PodStatus}, + }, }; use kube::{ Client, Config, Error, Resource, api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt}, config::{KubeConfigOptions, Kubeconfig}, core::ErrorResponse, + error::DiscoveryError, runtime::reflector::Lookup, }; use kube::{api::DynamicObject, runtime::conditions}; @@ -19,7 +25,7 @@ use log::{debug, error, trace}; use serde::{Serialize, de::DeserializeOwned}; use serde_json::{Value, json}; use similar::TextDiff; -use tokio::io::AsyncReadExt; +use tokio::{io::AsyncReadExt, time::sleep}; #[derive(new, Clone)] pub struct K8sClient { @@ -153,6 +159,41 @@ impl K8sClient { } } + pub async fn wait_for_pod_ready( + &self, + pod_name: &str, + namespace: Option<&str>, + ) -> Result<(), Error> { + let mut elapsed = 0; + let interval = 5; // seconds between checks + let timeout_secs = 120; + loop { + let pod = self.get_pod(pod_name, namespace).await?; + + if let Some(p) = pod { + if let Some(status) = p.status { + if let Some(phase) = status.phase { + if phase.to_lowercase() == "running" { + return Ok(()); + } + } + } + } + + if elapsed >= timeout_secs { + return Err(Error::Discovery(DiscoveryError::MissingResource(format!( + "'{}' in ns '{}' did not become ready within {}s", + pod_name, + namespace.unwrap(), + timeout_secs + )))); + } + + sleep(Duration::from_secs(interval)).await; + elapsed += interval; + } + } + /// Will execute a commond in the first pod found that matches the specified label /// '{label}={name}' pub async fn exec_app_capture_output( @@ -419,9 +460,12 @@ impl K8sClient { .as_str() .expect("couldn't get kind as str"); - let split: Vec<&str> = api_version.splitn(2, "/").collect(); - let g = split[0]; - let v = split[1]; + let mut it = api_version.splitn(2, '/'); + let first = it.next().unwrap(); + let (g, v) = match it.next() { + Some(second) => (first, second), + None => ("", first), + }; let gvk = GroupVersionKind::gvk(g, v, kind); let api_resource = ApiResource::from_gvk(&gvk); diff --git a/harmony/src/domain/topology/k8s_anywhere.rs b/harmony/src/domain/topology/k8s_anywhere.rs index 119ad13..53b6436 100644 --- a/harmony/src/domain/topology/k8s_anywhere.rs +++ b/harmony/src/domain/topology/k8s_anywhere.rs @@ -1,6 +1,7 @@ use std::{process::Command, sync::Arc}; use async_trait::async_trait; +use kube::api::GroupVersionKind; use log::{debug, info, warn}; use serde::Serialize; use tokio::sync::OnceCell; @@ -22,6 +23,7 @@ use crate::{ }, }, score::Score, + topology::ingress::Ingress, }; use super::{ @@ -198,6 +200,26 @@ impl K8sAnywhereTopology { } } + async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> { + let client = self.k8s_client().await?; + let gvk = GroupVersionKind { + group: "operator.openshift.io".into(), + version: "v1".into(), + kind: "IngressController".into(), + }; + let ic = client + .get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk) + .await?; + let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0); + if ready_replicas >= 1 { + return Ok(()); + } else { + return Err(PreparationError::new( + "openshift-ingress-operator not available".to_string(), + )); + } + } + fn is_helm_available(&self) -> Result<(), String> { let version_result = Command::new("helm") .arg("version") @@ -350,6 +372,10 @@ impl K8sAnywhereTopology { if let Some(Some(k8s_state)) = self.k8s_state.get() { match k8s_state.source { K8sSource::LocalK3d => { + warn!( + "Installing observability operator is not supported on LocalK3d source" + ); + return Ok(PreparationOutcome::Noop); debug!("installing cluster observability operator"); todo!(); let op_score = @@ -528,7 +554,7 @@ impl MultiTargetTopology for K8sAnywhereTopology { match self.config.harmony_profile.to_lowercase().as_str() { "staging" => DeploymentTarget::Staging, "production" => DeploymentTarget::Production, - _ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"), + _ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"), } } } @@ -550,3 +576,45 @@ impl TenantManager for K8sAnywhereTopology { .await } } + +#[async_trait] +impl Ingress for K8sAnywhereTopology { + //TODO this is specifically for openshift/okd which violates the k8sanywhere idea + async fn get_domain(&self, service: &str) -> Result { + let client = self.k8s_client().await?; + + if let Some(Some(k8s_state)) = self.k8s_state.get() { + match k8s_state.source { + K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")), + K8sSource::Kubeconfig => { + self.openshift_ingress_operator_available().await?; + + let gvk = GroupVersionKind { + group: "operator.openshift.io".into(), + version: "v1".into(), + kind: "IngressController".into(), + }; + let ic = client + .get_resource_json_value( + "default", + Some("openshift-ingress-operator"), + &gvk, + ) + .await + .map_err(|_| { + PreparationError::new("Failed to fetch IngressController".to_string()) + })?; + + match ic.data["status"]["domain"].as_str() { + Some(domain) => Ok(format!("{service}.{domain}")), + None => Err(PreparationError::new("Could not find domain".to_string())), + } + } + } + } else { + Err(PreparationError::new( + "Cannot get domain: unable to detect K8s state".to_string(), + )) + } + } +} diff --git a/harmony/src/domain/topology/localhost.rs b/harmony/src/domain/topology/localhost.rs index 71a8b93..667b3f8 100644 --- a/harmony/src/domain/topology/localhost.rs +++ b/harmony/src/domain/topology/localhost.rs @@ -1,9 +1,10 @@ use async_trait::async_trait; use derive_new::new; +use serde::{Deserialize, Serialize}; use super::{HelmCommand, PreparationError, PreparationOutcome, Topology}; -#[derive(new)] +#[derive(new, Clone, Debug, Serialize, Deserialize)] pub struct LocalhostTopology; #[async_trait] diff --git a/harmony/src/domain/topology/mod.rs b/harmony/src/domain/topology/mod.rs index a1060a5..85e57d7 100644 --- a/harmony/src/domain/topology/mod.rs +++ b/harmony/src/domain/topology/mod.rs @@ -1,4 +1,5 @@ mod ha_cluster; +pub mod ingress; use harmony_types::net::IpAddress; mod host_binding; mod http; diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index c7ab5cc..99db03a 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -1,10 +1,14 @@ -use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; +use std::{error::Error, net::Ipv4Addr, str::FromStr, sync::Arc}; use async_trait::async_trait; -use harmony_types::net::{IpAddress, MacAddress}; +use derive_new::new; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::PortLocation, +}; use serde::Serialize; -use crate::executors::ExecutorError; +use crate::{executors::ExecutorError, hardware::PhysicalHost}; use super::{LogicalHost, k8s::K8sClient}; @@ -172,6 +176,80 @@ impl FromStr for DnsRecordType { } } +#[async_trait] +pub trait Switch: Send + Sync { + async fn setup_switch(&self) -> Result<(), SwitchError>; + + async fn get_port_for_mac_address( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError>; +} + +#[derive(Clone, Debug, PartialEq)] +pub struct HostNetworkConfig { + pub switch_ports: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SwitchPort { + pub interface: NetworkInterface, + pub port: PortLocation, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct NetworkInterface { + pub name: String, + pub mac_address: MacAddress, + pub speed_mbps: Option, + pub mtu: u32, +} + +#[derive(Debug, Clone, new)] +pub struct SwitchError { + msg: String, +} + +impl std::fmt::Display for SwitchError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.msg) + } +} + +impl Error for SwitchError {} + +#[async_trait] +pub trait SwitchClient: Send + Sync { + /// Executes essential, idempotent, one-time initial configuration steps. + /// + /// This is an opiniated procedure that setups a switch to provide high availability + /// capabilities as decided by the NationTech team. + /// + /// This includes tasks like enabling switchport for all interfaces + /// except the ones intended for Fabric Networking, etc. + /// + /// The implementation must ensure the operation is **idempotent** (safe to run multiple times) + /// and that it doesn't break existing configurations. + async fn setup(&self) -> Result<(), SwitchError>; + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError>; + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result; +} + #[cfg(test)] mod test { use std::sync::Arc; diff --git a/harmony/src/infra/brocade.rs b/harmony/src/infra/brocade.rs new file mode 100644 index 0000000..f721328 --- /dev/null +++ b/harmony/src/infra/brocade.rs @@ -0,0 +1,385 @@ +use async_trait::async_trait; +use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode}; +use harmony_secret::Secret; +use harmony_types::{ + net::{IpAddress, MacAddress}, + switch::{PortDeclaration, PortLocation}, +}; +use option_ext::OptionExt; +use serde::{Deserialize, Serialize}; + +use crate::topology::{SwitchClient, SwitchError}; + +pub struct BrocadeSwitchClient { + brocade: Box, +} + +impl BrocadeSwitchClient { + pub async fn init( + ip_addresses: &[IpAddress], + username: &str, + password: &str, + options: Option, + ) -> Result { + let brocade = brocade::init(ip_addresses, 22, username, password, options).await?; + Ok(Self { brocade }) + } +} + +#[async_trait] +impl SwitchClient for BrocadeSwitchClient { + async fn setup(&self) -> Result<(), SwitchError> { + let stack_topology = self + .brocade + .get_stack_topology() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces = self + .brocade + .get_interfaces() + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + let interfaces: Vec<(String, PortOperatingMode)> = interfaces + .into_iter() + .filter(|interface| { + interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected + }) + .filter(|interface| { + !stack_topology.iter().any(|link: &InterSwitchLink| { + link.local_port == interface.port_location + || link.remote_port.contains(&interface.port_location) + }) + }) + .map(|interface| (interface.name.clone(), PortOperatingMode::Access)) + .collect(); + + if interfaces.is_empty() { + return Ok(()); + } + + self.brocade + .configure_interfaces(interfaces) + .await + .map_err(|e| SwitchError::new(e.to_string()))?; + + Ok(()) + } + + async fn find_port( + &self, + mac_address: &MacAddress, + ) -> Result, SwitchError> { + let table = self + .brocade + .get_mac_address_table() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + let port = table + .iter() + .find(|entry| entry.mac_address == *mac_address) + .map(|entry| match &entry.port { + PortDeclaration::Single(port_location) => Ok(port_location.clone()), + _ => Err(SwitchError::new( + "Multiple ports found for MAC address".into(), + )), + }); + + match port { + Some(Ok(p)) => Ok(Some(p)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } + + async fn configure_port_channel( + &self, + channel_name: &str, + switch_ports: Vec, + ) -> Result { + let channel_id = self + .brocade + .find_available_channel_id() + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + self.brocade + .create_port_channel(channel_id, channel_name, &switch_ports) + .await + .map_err(|e| SwitchError::new(format!("{e}")))?; + + Ok(channel_id) + } +} + +#[derive(Secret, Serialize, Deserialize, Debug)] +pub struct BrocadeSwitchAuth { + pub username: String, + pub password: String, +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + + use assertor::*; + use async_trait::async_trait; + use brocade::{ + BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus, + InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, + }; + use harmony_types::switch::PortLocation; + + use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient}; + + #[tokio::test] + async fn setup_should_configure_ethernet_interfaces_as_access_ports() { + let first_interface = given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(); + let second_interface = given_interface() + .with_port_location(PortLocation(1, 0, 4)) + .build(); + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![first_interface.clone(), second_interface.clone()], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).contains_exactly(vec![ + (first_interface.name.clone(), PortOperatingMode::Access), + (second_interface.name.clone(), PortOperatingMode::Access), + ]); + } + + #[tokio::test] + async fn setup_with_an_already_configured_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_operating_mode(Some(PortOperatingMode::Access)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_a_disconnected_interface_should_skip_configuration() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![], + vec![ + given_interface() + .with_status(InterfaceStatus::SfpAbsent) + .build(), + given_interface() + .with_status(InterfaceStatus::NotConnected) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[tokio::test] + async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() { + let brocade = Box::new(FakeBrocadeClient::new( + vec![ + given_inter_switch_link() + .between(PortLocation(1, 0, 1), PortLocation(2, 0, 1)) + .build(), + given_inter_switch_link() + .between(PortLocation(2, 0, 2), PortLocation(3, 0, 1)) + .build(), + ], + vec![ + given_interface() + .with_port_location(PortLocation(1, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(2, 0, 1)) + .build(), + given_interface() + .with_port_location(PortLocation(3, 0, 1)) + .build(), + ], + )); + let client = BrocadeSwitchClient { + brocade: brocade.clone(), + }; + + client.setup().await.unwrap(); + + let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); + assert_that!(*configured_interfaces).is_empty(); + } + + #[derive(Clone)] + struct FakeBrocadeClient { + stack_topology: Vec, + interfaces: Vec, + configured_interfaces: Arc>>, + } + + #[async_trait] + impl BrocadeClient for FakeBrocadeClient { + async fn version(&self) -> Result { + todo!() + } + + async fn get_mac_address_table(&self) -> Result, Error> { + todo!() + } + + async fn get_stack_topology(&self) -> Result, Error> { + Ok(self.stack_topology.clone()) + } + + async fn get_interfaces(&self) -> Result, Error> { + Ok(self.interfaces.clone()) + } + + async fn configure_interfaces( + &self, + interfaces: Vec<(String, PortOperatingMode)>, + ) -> Result<(), Error> { + let mut configured_interfaces = self.configured_interfaces.lock().unwrap(); + *configured_interfaces = interfaces; + + Ok(()) + } + + async fn find_available_channel_id(&self) -> Result { + todo!() + } + + async fn create_port_channel( + &self, + _channel_id: PortChannelId, + _channel_name: &str, + _ports: &[PortLocation], + ) -> Result<(), Error> { + todo!() + } + + async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> { + todo!() + } + } + + impl FakeBrocadeClient { + fn new(stack_topology: Vec, interfaces: Vec) -> Self { + Self { + stack_topology, + interfaces, + configured_interfaces: Arc::new(Mutex::new(vec![])), + } + } + } + + struct InterfaceInfoBuilder { + port_location: Option, + interface_type: Option, + operating_mode: Option, + status: Option, + } + + impl InterfaceInfoBuilder { + fn build(&self) -> InterfaceInfo { + let interface_type = self + .interface_type + .clone() + .unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into())); + let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1)); + let name = format!("{interface_type} {port_location}"); + let status = self.status.clone().unwrap_or(InterfaceStatus::Connected); + + InterfaceInfo { + name, + port_location, + interface_type, + operating_mode: self.operating_mode.clone(), + status, + } + } + + fn with_port_location(self, port_location: PortLocation) -> Self { + Self { + port_location: Some(port_location), + ..self + } + } + + fn with_operating_mode(self, operating_mode: Option) -> Self { + Self { + operating_mode, + ..self + } + } + + fn with_status(self, status: InterfaceStatus) -> Self { + Self { + status: Some(status), + ..self + } + } + } + + struct InterSwitchLinkBuilder { + link: Option<(PortLocation, PortLocation)>, + } + + impl InterSwitchLinkBuilder { + fn build(&self) -> InterSwitchLink { + let link = self + .link + .clone() + .unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1))); + + InterSwitchLink { + local_port: link.0, + remote_port: Some(link.1), + } + } + + fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self { + Self { + link: Some((local_port, remote_port)), + } + } + } + + fn given_interface() -> InterfaceInfoBuilder { + InterfaceInfoBuilder { + port_location: None, + interface_type: None, + operating_mode: None, + status: None, + } + } + + fn given_inter_switch_link() -> InterSwitchLinkBuilder { + InterSwitchLinkBuilder { link: None } + } +} diff --git a/harmony/src/infra/mod.rs b/harmony/src/infra/mod.rs index c05c7b6..203cf90 100644 --- a/harmony/src/infra/mod.rs +++ b/harmony/src/infra/mod.rs @@ -1,3 +1,4 @@ +pub mod brocade; pub mod executors; pub mod hp_ilo; pub mod intel_amt; diff --git a/harmony/src/modules/application/feature.rs b/harmony/src/modules/application/feature.rs index be4482f..9e1b1ae 100644 --- a/harmony/src/modules/application/feature.rs +++ b/harmony/src/modules/application/feature.rs @@ -1,7 +1,10 @@ +use std::error::Error; + use async_trait::async_trait; +use derive_new::new; use serde::Serialize; -use crate::topology::Topology; +use crate::{executors::ExecutorError, topology::Topology}; /// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability, /// ContinuousIntegration, ContinuousDelivery @@ -9,7 +12,10 @@ use crate::topology::Topology; pub trait ApplicationFeature: std::fmt::Debug + Send + Sync + ApplicationFeatureClone { - async fn ensure_installed(&self, topology: &T) -> Result<(), String>; + async fn ensure_installed( + &self, + topology: &T, + ) -> Result; fn name(&self) -> String; } @@ -40,3 +46,60 @@ impl Clone for Box> { self.clone_box() } } + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum InstallationOutcome { + Success { details: Vec }, + Noop, +} + +impl InstallationOutcome { + pub fn success() -> Self { + Self::Success { details: vec![] } + } + + pub fn success_with_details(details: Vec) -> Self { + Self::Success { details } + } + + pub fn noop() -> Self { + Self::Noop + } +} + +#[derive(Debug, Clone, new)] +pub struct InstallationError { + msg: String, +} + +impl std::fmt::Display for InstallationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.msg) + } +} + +impl Error for InstallationError {} + +impl From for InstallationError { + fn from(value: ExecutorError) -> Self { + Self { + msg: format!("InstallationError : {value}"), + } + } +} + +impl From for InstallationError { + fn from(value: kube::Error) -> Self { + Self { + msg: format!("InstallationError : {value}"), + } + } +} + +impl From for InstallationError { + fn from(value: String) -> Self { + Self { + msg: format!("PreparationError : {value}"), + } + } +} diff --git a/harmony/src/modules/application/features/endpoint.rs b/harmony/src/modules/application/features/endpoint.rs index 042f0dd..d2b23db 100644 --- a/harmony/src/modules/application/features/endpoint.rs +++ b/harmony/src/modules/application/features/endpoint.rs @@ -2,7 +2,7 @@ use async_trait::async_trait; use log::info; use crate::{ - modules::application::ApplicationFeature, + modules::application::{ApplicationFeature, InstallationError, InstallationOutcome}, topology::{K8sclient, Topology}, }; @@ -29,7 +29,10 @@ impl Default for PublicEndpoint { /// For now we only suport K8s ingress, but we will support more stuff at some point #[async_trait] impl ApplicationFeature for PublicEndpoint { - async fn ensure_installed(&self, _topology: &T) -> Result<(), String> { + async fn ensure_installed( + &self, + _topology: &T, + ) -> Result { info!( "Making sure public endpoint is installed for port {}", self.application_port diff --git a/harmony/src/modules/application/features/helm_argocd_score.rs b/harmony/src/modules/application/features/helm_argocd_score.rs index bfa3d8b..2e51a9e 100644 --- a/harmony/src/modules/application/features/helm_argocd_score.rs +++ b/harmony/src/modules/application/features/helm_argocd_score.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use harmony_macros::hurl; use kube::{Api, api::GroupVersionKind}; use log::{debug, warn}; use non_blank_string_rs::NonBlankString; @@ -13,7 +14,8 @@ use crate::{ modules::helm::chart::{HelmChartScore, HelmRepository}, score::Score, topology::{ - HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient, + HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress, + k8s::K8sClient, }, }; use harmony_types::id::Id; @@ -27,7 +29,7 @@ pub struct ArgoHelmScore { pub argo_apps: Vec, } -impl Score for ArgoHelmScore { +impl Score for ArgoHelmScore { fn create_interpret(&self) -> Box> { Box::new(ArgoInterpret { score: self.clone(), @@ -47,17 +49,15 @@ pub struct ArgoInterpret { } #[async_trait] -impl Interpret for ArgoInterpret { +impl Interpret for ArgoInterpret { async fn execute( &self, inventory: &Inventory, topology: &T, ) -> Result { let k8s_client = topology.k8s_client().await?; - let domain = self - .get_host_domain(k8s_client.clone(), self.score.openshift) - .await?; - let domain = format!("argo.{domain}"); + let svc = format!("argo-{}", self.score.namespace.clone()); + let domain = topology.get_domain(&svc).await?; let helm_score = argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain); @@ -68,14 +68,17 @@ impl Interpret for ArgoInterpret { .await .unwrap(); - Ok(Outcome::success(format!( - "ArgoCD installed with {} {}", - self.argo_apps.len(), - match self.argo_apps.len() { - 1 => "application", - _ => "applications", - } - ))) + Ok(Outcome::success_with_details( + format!( + "ArgoCD {} {}", + self.argo_apps.len(), + match self.argo_apps.len() { + 1 => "application", + _ => "applications", + } + ), + vec![format!("argo application: http://{}", domain)], + )) } fn get_name(&self) -> InterpretName { @@ -158,6 +161,9 @@ global: ## Used for ingresses, certificates, SSO, notifications, etc. domain: {domain} + securityContext: + runAsUser: null + # -- Runtime class name for all components runtimeClassName: "" @@ -469,6 +475,13 @@ redis: # -- Redis name name: redis + serviceAccount: + create: true + + securityContext: + runAsUser: null + + ## Redis image image: # -- Redis repository @@ -1039,7 +1052,7 @@ commitServer: install_only: false, repository: Some(HelmRepository::new( "argo".to_string(), - url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(), + hurl!("https://argoproj.github.io/argo-helm"), true, )), } diff --git a/harmony/src/modules/application/features/mod.rs b/harmony/src/modules/application/features/mod.rs index 93f6412..f2500db 100644 --- a/harmony/src/modules/application/features/mod.rs +++ b/harmony/src/modules/application/features/mod.rs @@ -5,8 +5,8 @@ pub use endpoint::*; mod monitoring; pub use monitoring::*; -mod continuous_delivery; -pub use continuous_delivery::*; +mod packaging_deployment; +pub use packaging_deployment::*; mod helm_argocd_score; pub use helm_argocd_score::*; diff --git a/harmony/src/modules/application/features/monitoring.rs b/harmony/src/modules/application/features/monitoring.rs index 1c1c00b..1a60d00 100644 --- a/harmony/src/modules/application/features/monitoring.rs +++ b/harmony/src/modules/application/features/monitoring.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; - -use crate::modules::application::{Application, ApplicationFeature}; +use crate::modules::application::{ + Application, ApplicationFeature, InstallationError, InstallationOutcome, +}; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; - use crate::topology::MultiTargetTopology; +use crate::topology::ingress::Ingress; use crate::{ inventory::Inventory, modules::monitoring::{ @@ -19,8 +19,12 @@ use crate::{ }; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; +use harmony_secret::SecretManager; +use harmony_secret_derive::Secret; use harmony_types::net::Url; use log::{debug, info}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; #[derive(Debug, Clone)] pub struct Monitoring { @@ -36,17 +40,22 @@ impl< + TenantManager + K8sclient + MultiTargetTopology - + std::fmt::Debug - + PrometheusApplicationMonitoring, + + PrometheusApplicationMonitoring + + Ingress + + std::fmt::Debug, > ApplicationFeature for Monitoring { - async fn ensure_installed(&self, topology: &T) -> Result<(), String> { + async fn ensure_installed( + &self, + topology: &T, + ) -> Result { info!("Ensuring monitoring is available for application"); let namespace = topology .get_tenant_config() .await .map(|ns| ns.name.clone()) .unwrap_or_else(|| self.application.name()); + let domain = topology.get_domain("ntfy").await.unwrap(); let mut alerting_score = ApplicationMonitoringScore { sender: CRDPrometheus { @@ -58,19 +67,17 @@ impl< }; let ntfy = NtfyScore { namespace: namespace.clone(), - host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), + host: domain, }; ntfy.interpret(&Inventory::empty(), topology) .await .map_err(|e| e.to_string())?; - let ntfy_default_auth_username = "harmony"; - let ntfy_default_auth_password = "harmony"; + let config = SecretManager::get_or_prompt::().await.unwrap(); + let ntfy_default_auth_header = format!( "Basic {}", - general_purpose::STANDARD.encode(format!( - "{ntfy_default_auth_username}:{ntfy_default_auth_password}" - )) + general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password)) ); debug!("ntfy_default_auth_header: {ntfy_default_auth_header}"); @@ -100,9 +107,17 @@ impl< .interpret(&Inventory::empty(), topology) .await .map_err(|e| e.to_string())?; - Ok(()) + + Ok(InstallationOutcome::success()) } + fn name(&self) -> String { "Monitoring".to_string() } } + +#[derive(Secret, Serialize, Deserialize, Clone, Debug)] +struct NtfyAuth { + username: String, + password: String, +} diff --git a/harmony/src/modules/application/features/continuous_delivery.rs b/harmony/src/modules/application/features/packaging_deployment.rs similarity index 89% rename from harmony/src/modules/application/features/continuous_delivery.rs rename to harmony/src/modules/application/features/packaging_deployment.rs index 1bc2d9d..4fafbf0 100644 --- a/harmony/src/modules/application/features/continuous_delivery.rs +++ b/harmony/src/modules/application/features/packaging_deployment.rs @@ -10,11 +10,13 @@ use crate::{ data::Version, inventory::Inventory, modules::application::{ - ApplicationFeature, HelmPackage, OCICompliant, + ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant, features::{ArgoApplication, ArgoHelmScore}, }, score::Score, - topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology}, + topology::{ + DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress, + }, }; /// ContinuousDelivery in Harmony provides this functionality : @@ -45,11 +47,11 @@ use crate::{ /// - ArgoCD to install/upgrade/rollback/inspect k8s resources /// - Kubernetes for runtime orchestration #[derive(Debug, Default, Clone)] -pub struct ContinuousDelivery { +pub struct PackagingDeployment { pub application: Arc, } -impl ContinuousDelivery { +impl PackagingDeployment { async fn deploy_to_local_k3d( &self, app_name: String, @@ -136,18 +138,28 @@ impl ContinuousDelivery { #[async_trait] impl< A: OCICompliant + HelmPackage + Clone + 'static, - T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static, -> ApplicationFeature for ContinuousDelivery + T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, +> ApplicationFeature for PackagingDeployment { - async fn ensure_installed(&self, topology: &T) -> Result<(), String> { + async fn ensure_installed( + &self, + topology: &T, + ) -> Result { let image = self.application.image_name(); + let domain = topology + .get_domain(&self.application.name()) + .await + .map_err(|e| e.to_string())?; // TODO Write CI/CD workflow files // we can autotedect the CI type using the remote url (default to github action for github // url, etc..) // Or ask for it when unknown - let helm_chart = self.application.build_push_helm_package(&image).await?; + let helm_chart = self + .application + .build_push_helm_package(&image, &domain) + .await?; // TODO: Make building image configurable/skippable if image already exists (prompt)") // https://git.nationtech.io/NationTech/harmony/issues/104 @@ -196,7 +208,11 @@ impl< .unwrap(); } }; - Ok(()) + + Ok(InstallationOutcome::success_with_details(vec![format!( + "{}: http://{domain}", + self.application.name() + )])) } fn name(&self) -> String { "ContinuousDelivery".to_string() diff --git a/harmony/src/modules/application/features/rhob_monitoring.rs b/harmony/src/modules/application/features/rhob_monitoring.rs index 62a5323..d87ef61 100644 --- a/harmony/src/modules/application/features/rhob_monitoring.rs +++ b/harmony/src/modules/application/features/rhob_monitoring.rs @@ -1,11 +1,14 @@ use std::sync::Arc; -use crate::modules::application::{Application, ApplicationFeature}; +use crate::modules::application::{ + Application, ApplicationFeature, InstallationError, InstallationOutcome, +}; use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; use crate::topology::MultiTargetTopology; +use crate::topology::ingress::Ingress; use crate::{ inventory::Inventory, modules::monitoring::{ @@ -24,7 +27,7 @@ use harmony_types::net::Url; use log::{debug, info}; #[derive(Debug, Clone)] -pub struct RHOBMonitoring { +pub struct Monitoring { pub application: Arc, pub alert_receiver: Vec>>, } @@ -37,11 +40,15 @@ impl< + TenantManager + K8sclient + MultiTargetTopology + + Ingress + std::fmt::Debug + PrometheusApplicationMonitoring, -> ApplicationFeature for RHOBMonitoring +> ApplicationFeature for Monitoring { - async fn ensure_installed(&self, topology: &T) -> Result<(), String> { + async fn ensure_installed( + &self, + topology: &T, + ) -> Result { info!("Ensuring monitoring is available for application"); let namespace = topology .get_tenant_config() @@ -57,9 +64,13 @@ impl< application: self.application.clone(), receivers: self.alert_receiver.clone(), }; + let domain = topology + .get_domain("ntfy") + .await + .map_err(|e| format!("could not get domain {e}"))?; let ntfy = NtfyScore { namespace: namespace.clone(), - host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), + host: domain.clone(), }; ntfy.interpret(&Inventory::empty(), topology) .await @@ -81,27 +92,33 @@ impl< .replace("=", ""); debug!("ntfy_default_auth_param: {ntfy_default_auth_param}"); - let ntfy_receiver = WebhookReceiver { name: "ntfy-webhook".to_string(), url: Url::Url( url::Url::parse( format!( - "http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}", - namespace.clone() + "http://{domain}/{}?auth={ntfy_default_auth_param}", + self.application.name() ) .as_str(), ) .unwrap(), ), }; - + debug!( + "ntfy webhook receiver \n{:#?}\nntfy topic: {}", + ntfy_receiver.clone(), + self.application.name() + ); alerting_score.receivers.push(Box::new(ntfy_receiver)); alerting_score .interpret(&Inventory::empty(), topology) .await .map_err(|e| e.to_string())?; - Ok(()) + Ok(InstallationOutcome::success_with_details(vec![format!( + "ntfy topic: {}", + self.application.name() + )])) } fn name(&self) -> String { "Monitoring".to_string() diff --git a/harmony/src/modules/application/mod.rs b/harmony/src/modules/application/mod.rs index 8e60984..b7bb973 100644 --- a/harmony/src/modules/application/mod.rs +++ b/harmony/src/modules/application/mod.rs @@ -24,8 +24,8 @@ use harmony_types::id::Id; #[derive(Clone, Debug)] pub enum ApplicationFeatureStatus { Installing, - Installed, - Failed { details: String }, + Installed { details: Vec }, + Failed { message: String }, } pub trait Application: std::fmt::Debug + Send + Sync { @@ -65,27 +65,32 @@ impl Interpret for Application .unwrap(); let _ = match feature.ensure_installed(topology).await { - Ok(()) => { + Ok(outcome) => { instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { topology: topology.name().into(), application: self.application.name(), feature: feature.name(), - status: ApplicationFeatureStatus::Installed, + status: ApplicationFeatureStatus::Installed { + details: match outcome { + InstallationOutcome::Success { details } => details, + InstallationOutcome::Noop => vec![], + }, + }, }) .unwrap(); } - Err(msg) => { + Err(error) => { instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { topology: topology.name().into(), application: self.application.name(), feature: feature.name(), status: ApplicationFeatureStatus::Failed { - details: msg.clone(), + message: error.to_string(), }, }) .unwrap(); return Err(InterpretError::new(format!( - "Application Interpret failed to install feature : {msg}" + "Application Interpret failed to install feature : {error}" ))); } }; diff --git a/harmony/src/modules/application/oci.rs b/harmony/src/modules/application/oci.rs index bf9f393..8b1585c 100644 --- a/harmony/src/modules/application/oci.rs +++ b/harmony/src/modules/application/oci.rs @@ -1,6 +1,5 @@ -use async_trait::async_trait; - use super::Application; +use async_trait::async_trait; #[async_trait] pub trait OCICompliant: Application { @@ -17,5 +16,10 @@ pub trait HelmPackage: Application { /// /// # Arguments /// * `image_url` - The full URL of the OCI container image to be used in the Deployment. - async fn build_push_helm_package(&self, image_url: &str) -> Result; + /// * `domain` - The domain where the application is hosted. + async fn build_push_helm_package( + &self, + image_url: &str, + domain: &str, + ) -> Result; } diff --git a/harmony/src/modules/application/rust.rs b/harmony/src/modules/application/rust.rs index 0d204cc..4874798 100644 --- a/harmony/src/modules/application/rust.rs +++ b/harmony/src/modules/application/rust.rs @@ -1,5 +1,4 @@ -use std::fs::{self, File}; -use std::io::Read; +use std::fs::{self}; use std::path::{Path, PathBuf}; use std::process; use std::sync::Arc; @@ -11,14 +10,13 @@ use dockerfile_builder::Dockerfile; use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR}; use dockerfile_builder::instruction_builder::CopyBuilder; use futures_util::StreamExt; -use log::{debug, info, log_enabled}; +use log::{debug, error, info, log_enabled, trace, warn}; use serde::Serialize; -use tar::{Archive, Builder, Header}; +use tar::{Builder, Header}; use walkdir::WalkDir; use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; use crate::{score::Score, topology::Topology}; -use harmony_types::net::Url; use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; @@ -58,7 +56,6 @@ pub enum RustWebFramework { #[derive(Debug, Clone, Serialize)] pub struct RustWebapp { pub name: String, - pub domain: Url, /// The path to the root of the Rust project to be containerized. pub project_root: PathBuf, pub service_port: u32, @@ -73,12 +70,17 @@ impl Application for RustWebapp { #[async_trait] impl HelmPackage for RustWebapp { - async fn build_push_helm_package(&self, image_url: &str) -> Result { + async fn build_push_helm_package( + &self, + image_url: &str, + domain: &str, + ) -> Result { info!("Starting Helm chart build and push for '{}'", self.name); // 1. Create the Helm chart files on disk. let chart_dir = self - .create_helm_chart_files(image_url) + .create_helm_chart_files(image_url, domain) + .await .map_err(|e| format!("Failed to create Helm chart files: {}", e))?; info!("Successfully created Helm chart files in {:?}", chart_dir); @@ -160,7 +162,7 @@ impl RustWebapp { &self, image_name: &str, ) -> Result> { - debug!("Generating Dockerfile for '{}'", self.name); + info!("Generating Dockerfile for '{}'", self.name); let dockerfile = self.get_or_build_dockerfile(); let quiet = !log_enabled!(log::Level::Debug); match dockerfile @@ -192,8 +194,41 @@ impl RustWebapp { Some(body_full(tar_data.into())), ); - while let Some(msg) = image_build_stream.next().await { - debug!("Message: {msg:?}"); + while let Some(mut msg) = image_build_stream.next().await { + trace!("Got bollard msg {msg:?}"); + match msg { + Ok(mut msg) => { + if let Some(progress) = msg.progress_detail { + info!( + "Build progress {}/{}", + progress.current.unwrap_or(0), + progress.total.unwrap_or(0) + ); + } + + if let Some(mut log) = msg.stream { + if log.ends_with('\n') { + log.pop(); + if log.ends_with('\r') { + log.pop(); + } + } + info!("{log}"); + } + + if let Some(error) = msg.error { + warn!("Build error : {error:?}"); + } + + if let Some(error) = msg.error_detail { + warn!("Build error : {error:?}"); + } + } + Err(e) => { + error!("Build failed : {e}"); + return Err(format!("Build failed : {e}").into()); + } + } } Ok(image_name.to_string()) @@ -220,7 +255,9 @@ impl RustWebapp { ".git", ".github", ".harmony_generated", + "harmony", "node_modules", + "Dockerfile.harmony", ]; let mut entries: Vec<_> = WalkDir::new(project_root) .into_iter() @@ -265,8 +302,6 @@ impl RustWebapp { let docker = Docker::connect_with_socket_defaults().unwrap(); - // let push_options = PushImageOptionsBuilder::new().tag(tag); - let mut push_image_stream = docker.push_image( image_tag, Some(PushImageOptionsBuilder::new().build()), @@ -274,6 +309,8 @@ impl RustWebapp { ); while let Some(msg) = push_image_stream.next().await { + // let msg = msg?; + // TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io debug!("Message: {msg:?}"); } @@ -408,9 +445,10 @@ impl RustWebapp { } /// Creates all necessary files for a basic Helm chart. - fn create_helm_chart_files( + async fn create_helm_chart_files( &self, image_url: &str, + domain: &str, ) -> Result> { let chart_name = format!("{}-chart", self.name); let chart_dir = self @@ -460,21 +498,15 @@ ingress: enabled: true # Annotations for cert-manager to handle SSL. annotations: - cert-manager.io/cluster-issuer: "letsencrypt-prod" # Add other annotations like nginx ingress class if needed # kubernetes.io/ingress.class: nginx hosts: - - host: chart-example.local + - host: {} paths: - path: / pathType: ImplementationSpecific - tls: - - secretName: {}-tls - hosts: - - chart-example.local - "#, - chart_name, image_repo, image_tag, self.service_port, self.name + chart_name, image_repo, image_tag, self.service_port, domain, ); fs::write(chart_dir.join("values.yaml"), values_yaml)?; diff --git a/harmony/src/modules/cert_manager/helm.rs b/harmony/src/modules/cert_manager/helm.rs index eae0ed6..b0770f9 100644 --- a/harmony/src/modules/cert_manager/helm.rs +++ b/harmony/src/modules/cert_manager/helm.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, str::FromStr}; +use harmony_macros::hurl; use non_blank_string_rs::NonBlankString; use serde::Serialize; use url::Url; @@ -33,7 +34,7 @@ impl Score for CertManagerHelmScore { install_only: true, repository: Some(HelmRepository::new( "jetstack".to_string(), - Url::parse("https://charts.jetstack.io").unwrap(), + hurl!("https://charts.jetstack.io"), true, )), } diff --git a/harmony/src/modules/dhcp.rs b/harmony/src/modules/dhcp.rs index eff2912..e261220 100644 --- a/harmony/src/modules/dhcp.rs +++ b/harmony/src/modules/dhcp.rs @@ -69,17 +69,14 @@ impl DhcpInterpret { dhcp_server.set_pxe_options(pxe_options).await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!( - "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]", - self.score.boot_filename, - self.score.boot_filename, - self.score.filename, - self.score.filename64, - self.score.filenameipxe - ), - )) + Ok(Outcome::success(format!( + "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]", + self.score.boot_filename, + self.score.boot_filename, + self.score.filename, + self.score.filename64, + self.score.filenameipxe + ))) } } @@ -122,8 +119,7 @@ impl Interpret for DhcpInterpret { topology.commit_config().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, + Ok(Outcome::success( "Dhcp Interpret execution successful".to_string(), )) } @@ -197,10 +193,10 @@ impl DhcpHostBindingInterpret { } } - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!("Dhcp Interpret registered {} entries", number_new_entries), - )) + Ok(Outcome::success(format!( + "Dhcp Interpret registered {} entries", + number_new_entries + ))) } } @@ -236,12 +232,9 @@ impl Interpret for DhcpHostBindingInterpret { topology.commit_config().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!( - "Dhcp Host Binding Interpret execution successful on {} hosts", - self.score.host_binding.len() - ), - )) + Ok(Outcome::success(format!( + "Dhcp Host Binding Interpret execution successful on {} hosts", + self.score.host_binding.len() + ))) } } diff --git a/harmony/src/modules/dns.rs b/harmony/src/modules/dns.rs index 9608fa1..b0d3a1d 100644 --- a/harmony/src/modules/dns.rs +++ b/harmony/src/modules/dns.rs @@ -55,8 +55,7 @@ impl DnsInterpret { dns.register_dhcp_leases(register).await?; } - Ok(Outcome::new( - InterpretStatus::SUCCESS, + Ok(Outcome::success( "DNS Interpret execution successfull".to_string(), )) } @@ -68,13 +67,10 @@ impl DnsInterpret { let entries = &self.score.dns_entries; dns_server.ensure_hosts_registered(entries.clone()).await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!( - "DnsInterpret registered {} hosts successfully", - entries.len() - ), - )) + Ok(Outcome::success(format!( + "DnsInterpret registered {} hosts successfully", + entries.len() + ))) } } @@ -111,8 +107,7 @@ impl Interpret for DnsInterpret { topology.commit_config().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, + Ok(Outcome::success( "Dns Interpret execution successful".to_string(), )) } diff --git a/harmony/src/modules/helm/chart.rs b/harmony/src/modules/helm/chart.rs index 9048ce6..4b678f1 100644 --- a/harmony/src/modules/helm/chart.rs +++ b/harmony/src/modules/helm/chart.rs @@ -5,6 +5,7 @@ use crate::score::Score; use crate::topology::{HelmCommand, Topology}; use async_trait::async_trait; use harmony_types::id::Id; +use harmony_types::net::Url; use helm_wrapper_rs; use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; use log::{debug, info, warn}; @@ -15,7 +16,6 @@ use std::path::Path; use std::process::{Command, Output, Stdio}; use std::str::FromStr; use temp_file::TempFile; -use url::Url; #[derive(Debug, Clone, Serialize)] pub struct HelmRepository { @@ -78,7 +78,8 @@ impl HelmChartInterpret { repo.name, repo.url, repo.force_update ); - let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()]; + let repo_url = repo.url.to_string(); + let mut add_args = vec!["repo", "add", &repo.name, &repo_url]; if repo.force_update { add_args.push("--force-update"); } @@ -153,6 +154,10 @@ impl Interpret for HelmChartInterpret { let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() { Some(yaml_str) => { tf = temp_file::with_contents(yaml_str.as_bytes()); + debug!( + "values yaml string for chart {} :\n {yaml_str}", + self.score.chart_name + ); Some(tf.path()) } None => None, @@ -193,13 +198,10 @@ impl Interpret for HelmChartInterpret { self.score.release_name, ns ); - return Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!( - "Helm Chart '{}' already installed to namespace {ns} and install_only=true", - self.score.release_name - ), - )); + return Ok(Outcome::success(format!( + "Helm Chart '{}' already installed to namespace {ns} and install_only=true", + self.score.release_name + ))); } else { info!( "Release '{}' not found in namespace '{}'. Proceeding with installation.", @@ -224,18 +226,18 @@ impl Interpret for HelmChartInterpret { }; match status { - helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!("Helm Chart {} deployed", self.score.release_name), - )), - helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new( - InterpretStatus::RUNNING, - format!("Helm Chart {} pending install...", self.score.release_name), - )), - helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new( - InterpretStatus::RUNNING, - format!("Helm Chart {} pending upgrade...", self.score.release_name), - )), + helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!( + "Helm Chart {} deployed", + self.score.release_name + ))), + helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!( + "Helm Chart {} pending install...", + self.score.release_name + ))), + helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!( + "Helm Chart {} pending upgrade...", + self.score.release_name + ))), helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!( "Helm Chart {} installation failed", self.score.release_name diff --git a/harmony/src/modules/helm/command.rs b/harmony/src/modules/helm/command.rs deleted file mode 100644 index c4d92c1..0000000 --- a/harmony/src/modules/helm/command.rs +++ /dev/null @@ -1,364 +0,0 @@ -use async_trait::async_trait; -use log::debug; -use serde::Serialize; -use std::collections::HashMap; -use std::io::ErrorKind; -use std::path::PathBuf; -use std::process::{Command, Output}; -use temp_dir::{self, TempDir}; -use temp_file::TempFile; - -use crate::data::Version; -use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; -use crate::inventory::Inventory; -use crate::score::Score; -use crate::topology::{HelmCommand, K8sclient, Topology}; -use harmony_types::id::Id; - -#[derive(Clone)] -pub struct HelmCommandExecutor { - pub env: HashMap, - pub path: Option, - pub args: Vec, - pub api_versions: Option>, - pub kube_version: String, - pub debug: Option, - pub globals: HelmGlobals, - pub chart: HelmChart, -} - -#[derive(Clone)] -pub struct HelmGlobals { - pub chart_home: Option, - pub config_home: Option, -} - -#[derive(Debug, Clone, Serialize)] -pub struct HelmChart { - pub name: String, - pub version: Option, - pub repo: Option, - pub release_name: Option, - pub namespace: Option, - pub additional_values_files: Vec, - pub values_file: Option, - pub values_inline: Option, - pub include_crds: Option, - pub skip_hooks: Option, - pub api_versions: Option>, - pub kube_version: Option, - pub name_template: String, - pub skip_tests: Option, - pub debug: Option, -} - -impl HelmCommandExecutor { - pub fn generate(mut self) -> Result { - if self.globals.chart_home.is_none() { - self.globals.chart_home = Some(PathBuf::from("charts")); - } - - if self - .clone() - .chart - .clone() - .chart_exists_locally(self.clone().globals.chart_home.unwrap()) - .is_none() - { - if self.chart.repo.is_none() { - return Err(std::io::Error::new( - ErrorKind::Other, - "Chart doesn't exist locally and no repo specified", - )); - } - self.clone().run_command( - self.chart - .clone() - .pull_command(self.globals.chart_home.clone().unwrap()), - )?; - } - - let out = self.clone().run_command( - self.chart - .clone() - .helm_args(self.globals.chart_home.clone().unwrap()), - )?; - - // TODO: don't use unwrap here - let s = String::from_utf8(out.stdout).unwrap(); - debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap()); - debug!("helm status: {}", out.status); - debug!("helm output: {s}"); - - let clean = s.split_once("---").unwrap().1; - - Ok(clean.to_string()) - } - - pub fn version(self) -> Result { - let out = self.run_command(vec![ - "version".to_string(), - "-c".to_string(), - "--short".to_string(), - ])?; - - // TODO: don't use unwrap - Ok(String::from_utf8(out.stdout).unwrap()) - } - - pub fn run_command(mut self, mut args: Vec) -> Result { - if let Some(d) = self.debug { - if d { - args.push("--debug".to_string()); - } - } - - let path = if let Some(p) = self.path { - p - } else { - PathBuf::from("helm") - }; - - let config_home = match self.globals.config_home { - Some(p) => p, - None => PathBuf::from(TempDir::new()?.path()), - }; - - if let Some(yaml_str) = self.chart.values_inline { - let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes()); - self.chart - .additional_values_files - .push(PathBuf::from(tf.path())); - }; - - self.env.insert( - "HELM_CONFIG_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - self.env.insert( - "HELM_CACHE_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - self.env.insert( - "HELM_DATA_HOME".to_string(), - config_home.to_str().unwrap().to_string(), - ); - - Command::new(path).envs(self.env).args(args).output() - } -} - -impl HelmChart { - pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option { - let chart_path = - PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string()); - - if chart_path.exists() { - Some(chart_path) - } else { - None - } - } - - pub fn pull_command(self, chart_home: PathBuf) -> Vec { - let mut args = vec![ - "pull".to_string(), - "--untar".to_string(), - "--untardir".to_string(), - chart_home.to_str().unwrap().to_string(), - ]; - - match self.repo { - Some(r) => { - if r.starts_with("oci://") { - args.push( - r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(), - ); - } else { - args.push("--repo".to_string()); - args.push(r.to_string()); - - args.push(self.name); - } - } - None => args.push(self.name), - }; - - if let Some(v) = self.version { - args.push("--version".to_string()); - args.push(v.to_string()); - } - - args - } - - pub fn helm_args(self, chart_home: PathBuf) -> Vec { - let mut args: Vec = vec!["template".to_string()]; - - match self.release_name { - Some(rn) => args.push(rn.to_string()), - None => args.push("--generate-name".to_string()), - } - - args.push( - PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str()) - .to_str() - .unwrap() - .to_string(), - ); - - if let Some(n) = self.namespace { - args.push("--namespace".to_string()); - args.push(n.to_string()); - } - - if let Some(f) = self.values_file { - args.push("-f".to_string()); - args.push(f.to_str().unwrap().to_string()); - } - - for f in self.additional_values_files { - args.push("-f".to_string()); - args.push(f.to_str().unwrap().to_string()); - } - - if let Some(vv) = self.api_versions { - for v in vv { - args.push("--api-versions".to_string()); - args.push(v); - } - } - - if let Some(kv) = self.kube_version { - args.push("--kube-version".to_string()); - args.push(kv); - } - - if let Some(crd) = self.include_crds { - if crd { - args.push("--include-crds".to_string()); - } - } - - if let Some(st) = self.skip_tests { - if st { - args.push("--skip-tests".to_string()); - } - } - - if let Some(sh) = self.skip_hooks { - if sh { - args.push("--no-hooks".to_string()); - } - } - - if let Some(d) = self.debug { - if d { - args.push("--debug".to_string()); - } - } - - args - } -} - -#[derive(Debug, Clone, Serialize)] -pub struct HelmChartScoreV2 { - pub chart: HelmChart, -} - -impl Score for HelmChartScoreV2 { - fn create_interpret(&self) -> Box> { - Box::new(HelmChartInterpretV2 { - score: self.clone(), - }) - } - - fn name(&self) -> String { - format!( - "{} {} HelmChartScoreV2", - self.chart - .release_name - .clone() - .unwrap_or("Unknown".to_string()), - self.chart.name - ) - } -} - -#[derive(Debug, Serialize)] -pub struct HelmChartInterpretV2 { - pub score: HelmChartScoreV2, -} -impl HelmChartInterpretV2 {} - -#[async_trait] -impl Interpret for HelmChartInterpretV2 { - async fn execute( - &self, - _inventory: &Inventory, - _topology: &T, - ) -> Result { - let _ns = self - .score - .chart - .namespace - .as_ref() - .unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster")); - - let helm_executor = HelmCommandExecutor { - env: HashMap::new(), - path: None, - args: vec![], - api_versions: None, - kube_version: "v1.33.0".to_string(), - debug: Some(false), - globals: HelmGlobals { - chart_home: None, - config_home: None, - }, - chart: self.score.chart.clone(), - }; - - // let mut helm_options = Vec::new(); - // if self.score.create_namespace { - // helm_options.push(NonBlankString::from_str("--create-namespace").unwrap()); - // } - - let res = helm_executor.generate(); - - let _output = match res { - Ok(output) => output, - Err(err) => return Err(InterpretError::new(err.to_string())), - }; - - // TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client - - // let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap(); - - // let client = topology - // .k8s_client() - // .await - // .expect("Environment should provide enough information to instanciate a client") - // .apply_namespaced(&vec![output], Some(ns.to_string().as_str())); - // match client.apply_yaml(output) { - // Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())), - // Err(e) => return Err(InterpretError::new(e)), - // } - - Ok(Outcome::success("Helm chart deployed".to_string())) - } - - fn get_name(&self) -> InterpretName { - InterpretName::HelmCommand - } - fn get_version(&self) -> Version { - todo!() - } - fn get_status(&self) -> InterpretStatus { - todo!() - } - fn get_children(&self) -> Vec { - todo!() - } -} diff --git a/harmony/src/modules/helm/mod.rs b/harmony/src/modules/helm/mod.rs index de69381..831fbe5 100644 --- a/harmony/src/modules/helm/mod.rs +++ b/harmony/src/modules/helm/mod.rs @@ -1,2 +1 @@ pub mod chart; -pub mod command; diff --git a/harmony/src/modules/inventory/mod.rs b/harmony/src/modules/inventory/mod.rs index 0274dc4..174231b 100644 --- a/harmony/src/modules/inventory/mod.rs +++ b/harmony/src/modules/inventory/mod.rs @@ -133,10 +133,9 @@ impl Interpret for DiscoverInventoryAgentInterpret { }, ) .await; - Ok(Outcome { - status: InterpretStatus::SUCCESS, - message: "Discovery process completed successfully".to_string(), - }) + Ok(Outcome::success( + "Discovery process completed successfully".to_string(), + )) } fn get_name(&self) -> InterpretName { diff --git a/harmony/src/modules/k8s/ingress.rs b/harmony/src/modules/k8s/ingress.rs index d07d82f..045f1f8 100644 --- a/harmony/src/modules/k8s/ingress.rs +++ b/harmony/src/modules/k8s/ingress.rs @@ -1,11 +1,15 @@ +use async_trait::async_trait; use harmony_macros::ingress_path; +use harmony_types::id::Id; use k8s_openapi::api::networking::v1::Ingress; use log::{debug, trace}; use serde::Serialize; use serde_json::json; use crate::{ - interpret::Interpret, + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, score::Score, topology::{K8sclient, Topology}, }; @@ -40,6 +44,7 @@ pub struct K8sIngressScore { pub path: Option, pub path_type: Option, pub namespace: Option, + pub ingress_class_name: Option, } impl Score for K8sIngressScore { @@ -54,12 +59,18 @@ impl Score for K8sIngressScore { None => PathType::Prefix, }; + let ingress_class = match self.ingress_class_name.clone() { + Some(ingress_class_name) => ingress_class_name, + None => "\"default\"".to_string(), + }; + let ingress = json!( { "metadata": { "name": self.name.to_string(), }, "spec": { + "ingressClassName": ingress_class.as_str(), "rules": [ { "host": self.host.to_string(), "http": { @@ -90,11 +101,12 @@ impl Score for K8sIngressScore { "Successfully built Ingress for host {:?}", ingress.metadata.name ); - Box::new(K8sResourceInterpret { - score: K8sResourceScore::single( - ingress.clone(), - self.namespace.clone().map(|f| f.to_string()), - ), + + Box::new(K8sIngressInterpret { + ingress, + service: self.name.to_string(), + namespace: self.namespace.clone().map(|f| f.to_string()), + host: self.host.clone(), }) } @@ -102,3 +114,62 @@ impl Score for K8sIngressScore { format!("{} K8sIngressScore", self.name) } } + +#[derive(std::fmt::Debug)] +struct K8sIngressInterpret { + ingress: Ingress, + service: String, + namespace: Option, + host: fqdn::FQDN, +} + +#[async_trait] +impl Interpret for K8sIngressInterpret { + async fn execute( + &self, + inventory: &Inventory, + topology: &T, + ) -> Result { + let result = K8sResourceInterpret { + score: K8sResourceScore::single(self.ingress.clone(), self.namespace.clone()), + } + .execute(inventory, topology) + .await; + + match result { + Ok(outcome) => match outcome.status { + InterpretStatus::SUCCESS => { + let details = match &self.namespace { + Some(namespace) => { + vec![format!( + "{} ({namespace}): http://{}", + self.service, self.host + )] + } + None => vec![format!("{}: {}", self.service, self.host)], + }; + + Ok(Outcome::success_with_details(outcome.message, details)) + } + _ => Ok(outcome), + }, + Err(e) => Err(e), + } + } + + fn get_name(&self) -> InterpretName { + InterpretName::K8sIngress + } + + fn get_version(&self) -> Version { + Version::from("0.0.1").unwrap() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + vec![] + } +} diff --git a/harmony/src/modules/lamp.rs b/harmony/src/modules/lamp.rs index 66ca45e..a33fa1d 100644 --- a/harmony/src/modules/lamp.rs +++ b/harmony/src/modules/lamp.rs @@ -147,6 +147,7 @@ impl Interpret for LAMPInterpret { port: 8080, path: Some(ingress_path), path_type: None, + ingress_class_name: None, namespace: self .get_namespace() .map(|nbs| fqdn!(nbs.to_string().as_str())), diff --git a/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs b/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs index 748c677..8bef793 100644 --- a/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs +++ b/harmony/src/modules/monitoring/alert_channel/discord_alert_channel.rs @@ -35,6 +35,24 @@ pub struct DiscordWebhook { #[async_trait] impl AlertReceiver for DiscordWebhook { async fn install(&self, sender: &RHOBObservability) -> Result { + let ns = sender.namespace.clone(); + let secret_name = format!("{}-secret", self.name.clone()); + let webhook_key = format!("{}", self.url.clone()); + + let mut string_data = BTreeMap::new(); + string_data.insert("webhook-url".to_string(), webhook_key.clone()); + + let secret = Secret { + metadata: kube::core::ObjectMeta { + name: Some(secret_name.clone()), + ..Default::default() + }, + string_data: Some(string_data), + type_: Some("Opaque".to_string()), + ..Default::default() + }; + + let _ = sender.client.apply(&secret, Some(&ns)).await; let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec { data: json!({ "route": { @@ -43,9 +61,14 @@ impl AlertReceiver for DiscordWebhook { "receivers": [ { "name": self.name, - "webhookConfigs": [ + "discordConfigs": [ { - "url": self.url, + "apiURL": { + "name": secret_name, + "key": "webhook-url", + }, + "title": "{{ template \"discord.default.title\" . }}", + "message": "{{ template \"discord.default.message\" . }}" } ] } diff --git a/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs b/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs index 52124ff..1b20df3 100644 --- a/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs +++ b/harmony/src/modules/monitoring/alert_channel/webhook_receiver.rs @@ -43,6 +43,11 @@ impl AlertReceiver for WebhookReceiver { "webhookConfigs": [ { "url": self.url, + "httpConfig": { + "tlsConfig": { + "insecureSkipVerify": true + } + } } ] } diff --git a/harmony/src/modules/monitoring/application_monitoring/application_monitoring_score.rs b/harmony/src/modules/monitoring/application_monitoring/application_monitoring_score.rs index f4707a8..8246d15 100644 --- a/harmony/src/modules/monitoring/application_monitoring/application_monitoring_score.rs +++ b/harmony/src/modules/monitoring/application_monitoring/application_monitoring_score.rs @@ -68,7 +68,9 @@ impl> Interpret PreparationOutcome::Success { details: _ } => { Ok(Outcome::success("Prometheus installed".into())) } - PreparationOutcome::Noop => Ok(Outcome::noop()), + PreparationOutcome::Noop => { + Ok(Outcome::noop("Prometheus installation skipped".into())) + } }, Err(err) => Err(InterpretError::from(err)), } diff --git a/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs b/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs index 17e42c3..5f5127f 100644 --- a/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs +++ b/harmony/src/modules/monitoring/application_monitoring/rhobs_application_monitoring_score.rs @@ -70,7 +70,9 @@ impl> Interpret PreparationOutcome::Success { details: _ } => { Ok(Outcome::success("Prometheus installed".into())) } - PreparationOutcome::Noop => Ok(Outcome::noop()), + PreparationOutcome::Noop => { + Ok(Outcome::noop("Prometheus installation skipped".into())) + } }, Err(err) => Err(InterpretError::from(err)), } diff --git a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs index bd542e9..d500891 100644 --- a/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs +++ b/harmony/src/modules/monitoring/kube_prometheus/crd/rhob_monitoring_stack.rs @@ -4,7 +4,9 @@ use kube::CustomResource; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector; +use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{ + LabelSelector, PrometheusSpec, +}; /// MonitoringStack CRD for monitoring.rhobs/v1alpha1 #[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)] diff --git a/harmony/src/modules/monitoring/mod.rs b/harmony/src/modules/monitoring/mod.rs index b93f0c6..edda516 100644 --- a/harmony/src/modules/monitoring/mod.rs +++ b/harmony/src/modules/monitoring/mod.rs @@ -4,4 +4,5 @@ pub mod application_monitoring; pub mod grafana; pub mod kube_prometheus; pub mod ntfy; +pub mod okd; pub mod prometheus; diff --git a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs index ecd9e01..57fffab 100644 --- a/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs +++ b/harmony/src/modules/monitoring/ntfy/helm/ntfy_helm_chart.rs @@ -45,6 +45,12 @@ service: ingress: enabled: {ingress_enabled} + hosts: + - host: {host} + paths: + - path: / + pathType: ImplementationSpecific + route: enabled: {route_enabled} diff --git a/harmony/src/modules/monitoring/ntfy/ntfy.rs b/harmony/src/modules/monitoring/ntfy/ntfy.rs index 87ed580..4ed342b 100644 --- a/harmony/src/modules/monitoring/ntfy/ntfy.rs +++ b/harmony/src/modules/monitoring/ntfy/ntfy.rs @@ -113,7 +113,13 @@ impl Interpret f .await?; info!("user added"); - Ok(Outcome::success("Ntfy installed".to_string())) + Ok(Outcome::success_with_details( + "Ntfy installed".to_string(), + vec![format!( + "Ntfy ({}): http://{}", + self.score.namespace, self.score.host + )], + )) } fn get_name(&self) -> InterpretName { diff --git a/harmony/src/modules/monitoring/okd/enable_user_workload.rs b/harmony/src/modules/monitoring/okd/enable_user_workload.rs new file mode 100644 index 0000000..b322b4d --- /dev/null +++ b/harmony/src/modules/monitoring/okd/enable_user_workload.rs @@ -0,0 +1,149 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{K8sclient, Topology, k8s::K8sClient}, +}; +use async_trait::async_trait; +use harmony_types::id::Id; +use k8s_openapi::api::core::v1::ConfigMap; +use kube::api::ObjectMeta; +use serde::Serialize; + +#[derive(Clone, Debug, Serialize)] +pub struct OpenshiftUserWorkloadMonitoring {} + +impl Score for OpenshiftUserWorkloadMonitoring { + fn name(&self) -> String { + "OpenshiftUserWorkloadMonitoringScore".to_string() + } + + fn create_interpret(&self) -> Box> { + Box::new(OpenshiftUserWorkloadMonitoringInterpret {}) + } +} + +#[derive(Clone, Debug, Serialize)] +pub struct OpenshiftUserWorkloadMonitoringInterpret {} + +#[async_trait] +impl Interpret for OpenshiftUserWorkloadMonitoringInterpret { + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + let client = topology.k8s_client().await.unwrap(); + self.update_cluster_monitoring_config_cm(&client).await?; + self.update_user_workload_monitoring_config_cm(&client) + .await?; + self.verify_user_workload(&client).await?; + Ok(Outcome::success( + "successfully enabled user-workload-monitoring".to_string(), + )) + } + + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OpenshiftUserWorkloadMonitoring") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} + +impl OpenshiftUserWorkloadMonitoringInterpret { + pub async fn update_cluster_monitoring_config_cm( + &self, + client: &Arc, + ) -> Result { + let mut data = BTreeMap::new(); + data.insert( + "config.yaml".to_string(), + r#" +enableUserWorkload: true +alertmanagerMain: + enableUserAlertmanagerConfig: true +"# + .to_string(), + ); + + let cm = ConfigMap { + metadata: ObjectMeta { + name: Some("cluster-monitoring-config".to_string()), + namespace: Some("openshift-monitoring".to_string()), + ..Default::default() + }, + data: Some(data), + ..Default::default() + }; + client.apply(&cm, Some("openshift-monitoring")).await?; + + Ok(Outcome::success( + "updated cluster-monitoring-config-map".to_string(), + )) + } + + pub async fn update_user_workload_monitoring_config_cm( + &self, + client: &Arc, + ) -> Result { + let mut data = BTreeMap::new(); + data.insert( + "config.yaml".to_string(), + r#" +alertmanager: + enabled: true + enableAlertmanagerConfig: true +"# + .to_string(), + ); + let cm = ConfigMap { + metadata: ObjectMeta { + name: Some("user-workload-monitoring-config".to_string()), + namespace: Some("openshift-user-workload-monitoring".to_string()), + ..Default::default() + }, + data: Some(data), + ..Default::default() + }; + client + .apply(&cm, Some("openshift-user-workload-monitoring")) + .await?; + + Ok(Outcome::success( + "updated openshift-user-monitoring-config-map".to_string(), + )) + } + + pub async fn verify_user_workload( + &self, + client: &Arc, + ) -> Result { + let namespace = "openshift-user-workload-monitoring"; + let alertmanager_name = "alertmanager-user-workload-0"; + let prometheus_name = "prometheus-user-workload-0"; + client + .wait_for_pod_ready(alertmanager_name, Some(namespace)) + .await?; + client + .wait_for_pod_ready(prometheus_name, Some(namespace)) + .await?; + + Ok(Outcome::success(format!( + "pods: {}, {} ready in ns: {}", + alertmanager_name, prometheus_name, namespace + ))) + } +} diff --git a/harmony/src/modules/monitoring/okd/mod.rs b/harmony/src/modules/monitoring/okd/mod.rs new file mode 100644 index 0000000..50339ba --- /dev/null +++ b/harmony/src/modules/monitoring/okd/mod.rs @@ -0,0 +1 @@ +pub mod enable_user_workload; diff --git a/harmony/src/modules/okd/bootstrap_01_prepare.rs b/harmony/src/modules/okd/bootstrap_01_prepare.rs index d3409e2..57b71d9 100644 --- a/harmony/src/modules/okd/bootstrap_01_prepare.rs +++ b/harmony/src/modules/okd/bootstrap_01_prepare.rs @@ -1,19 +1,19 @@ -use async_trait::async_trait; -use derive_new::new; -use harmony_types::id::Id; -use log::{error, info, warn}; -use serde::Serialize; - use crate::{ data::Version, hardware::PhysicalHost, infra::inventory::InventoryRepositoryFactory, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::{HostRole, Inventory}, - modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore}, + modules::inventory::DiscoverHostForRoleScore, score::Score, topology::HAClusterTopology, }; +use async_trait::async_trait; +use derive_new::new; +use harmony_types::id::Id; +use log::info; +use serde::Serialize; + // ------------------------------------------------------------------------------------------------- // Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent) // - This score exposes/ensures the default inventory assets and waits for discoveries. @@ -109,12 +109,9 @@ When you can dig them, confirm to continue. .await?; } - Ok(Outcome::new( - InterpretStatus::SUCCESS, - format!( - "Found and assigned bootstrap node: {}", - bootstrap_host.unwrap().summary() - ), - )) + Ok(Outcome::success(format!( + "Found and assigned bootstrap node: {}", + bootstrap_host.unwrap().summary() + ))) } } diff --git a/harmony/src/modules/okd/bootstrap_02_bootstrap.rs b/harmony/src/modules/okd/bootstrap_02_bootstrap.rs index 5b940fb..e9b3a6a 100644 --- a/harmony/src/modules/okd/bootstrap_02_bootstrap.rs +++ b/harmony/src/modules/okd/bootstrap_02_bootstrap.rs @@ -1,25 +1,13 @@ -use std::{fmt::Write, path::PathBuf}; - -use async_trait::async_trait; -use derive_new::new; -use harmony_secret::SecretManager; -use harmony_types::id::Id; -use log::{debug, error, info, warn}; -use serde::{Deserialize, Serialize}; -use tokio::{fs::File, io::AsyncWriteExt, process::Command}; - use crate::{ config::secret::{RedhatSecret, SshKeyPair}, data::{FileContent, FilePath, Version}, hardware::PhysicalHost, infra::inventory::InventoryRepositoryFactory, - instrumentation::{HarmonyEvent, instrument}, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::{HostRole, Inventory}, modules::{ dhcp::DhcpHostBindingScore, http::{IPxeMacBootFileScore, StaticFilesHttpScore}, - inventory::LaunchDiscoverInventoryAgentScore, okd::{ bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, templates::{BootstrapIpxeTpl, InstallConfigYaml}, @@ -28,6 +16,15 @@ use crate::{ score::Score, topology::{HAClusterTopology, HostBinding}, }; +use async_trait::async_trait; +use derive_new::new; +use harmony_secret::SecretManager; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; +use std::path::PathBuf; +use tokio::{fs::File, io::AsyncWriteExt, process::Command}; + // ------------------------------------------------------------------------------------------------- // Step 02: Bootstrap // - Select bootstrap node (from discovered set). @@ -313,7 +310,7 @@ impl OKDSetup02BootstrapInterpret { info!("[Bootstrap] Rebooting bootstrap node via SSH"); // TODO reboot programatically, there are some logical checks and refactoring to do such as // accessing the bootstrap node config (ip address) from the inventory - let confirmation = inquire::Confirm::new( + let _ = inquire::Confirm::new( "Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.", ) .prompt() @@ -379,9 +376,6 @@ impl Interpret for OKDSetup02BootstrapInterpret { self.reboot_target().await?; self.wait_for_bootstrap_complete().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Bootstrap phase complete".into(), - )) + Ok(Outcome::success("Bootstrap phase complete".into())) } } diff --git a/harmony/src/modules/okd/bootstrap_03_control_plane.rs b/harmony/src/modules/okd/bootstrap_03_control_plane.rs index a387e1e..af8e71f 100644 --- a/harmony/src/modules/okd/bootstrap_03_control_plane.rs +++ b/harmony/src/modules/okd/bootstrap_03_control_plane.rs @@ -1,11 +1,3 @@ -use std::{fmt::Write, path::PathBuf}; - -use async_trait::async_trait; -use derive_new::new; -use harmony_types::id::Id; -use log::{debug, info}; -use serde::Serialize; - use crate::{ data::Version, hardware::PhysicalHost, @@ -13,12 +5,20 @@ use crate::{ interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, inventory::{HostRole, Inventory}, modules::{ - dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, - inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, + dhcp::DhcpHostBindingScore, + http::IPxeMacBootFileScore, + inventory::DiscoverHostForRoleScore, + okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl}, }, score::Score, topology::{HAClusterTopology, HostBinding}, }; +use async_trait::async_trait; +use derive_new::new; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + // ------------------------------------------------------------------------------------------------- // Step 03: Control Plane // - Render per-MAC PXE & ignition for cp0/cp1/cp2. @@ -30,7 +30,7 @@ pub struct OKDSetup03ControlPlaneScore {} impl Score for OKDSetup03ControlPlaneScore { fn create_interpret(&self) -> Box> { - Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) + Box::new(OKDSetup03ControlPlaneInterpret::new()) } fn name(&self) -> String { @@ -40,17 +40,15 @@ impl Score for OKDSetup03ControlPlaneScore { #[derive(Debug, Clone)] pub struct OKDSetup03ControlPlaneInterpret { - score: OKDSetup03ControlPlaneScore, version: Version, status: InterpretStatus, } impl OKDSetup03ControlPlaneInterpret { - pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { + pub fn new() -> Self { let version = Version::from("1.0.0").unwrap(); Self { version, - score, status: InterpretStatus::QUEUED, } } @@ -161,7 +159,7 @@ impl OKDSetup03ControlPlaneInterpret { } .to_string(); - debug!("[ControlPlane] iPXE content template:\n{}", content); + debug!("[ControlPlane] iPXE content template:\n{content}"); // Create and apply an iPXE boot file for each node. for node in nodes { @@ -191,16 +189,13 @@ impl OKDSetup03ControlPlaneInterpret { /// Prompts the user to reboot the target control plane nodes. async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); - info!( - "[ControlPlane] Requesting reboot for control plane nodes: {:?}", - node_ids - ); + info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",); let confirmation = inquire::Confirm::new( &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; if !confirmation { return Err(InterpretError::new( @@ -212,14 +207,23 @@ impl OKDSetup03ControlPlaneInterpret { } /// Placeholder for automating network bonding configuration. - async fn persist_network_bond(&self) -> Result<(), InterpretError> { - // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join. - info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); + async fn persist_network_bond( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + hosts: &Vec, + ) -> Result<(), InterpretError> { + info!("[ControlPlane] Ensuring persistent bonding"); + let score = HostNetworkConfigurationScore { + hosts: hosts.clone(), + }; + score.interpret(inventory, topology).await?; + inquire::Confirm::new( "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", ) .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; Ok(()) } @@ -262,15 +266,15 @@ impl Interpret for OKDSetup03ControlPlaneInterpret { self.reboot_targets(&nodes).await?; // 5. Placeholder for post-boot network configuration (e.g., bonding). - self.persist_network_bond().await?; + self.persist_network_bond(inventory, topology, &nodes) + .await?; // TODO: Implement a step to wait for the control plane nodes to join the cluster // and for the cluster operators to become available. This would be similar to // the `wait-for bootstrap-complete` command. info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually."); - Ok(Outcome::new( - InterpretStatus::SUCCESS, + Ok(Outcome::success( "Control plane provisioning has been successfully initiated.".into(), )) } diff --git a/harmony/src/modules/okd/bootstrap_04_workers.rs b/harmony/src/modules/okd/bootstrap_04_workers.rs index d5ed87c..461cab9 100644 --- a/harmony/src/modules/okd/bootstrap_04_workers.rs +++ b/harmony/src/modules/okd/bootstrap_04_workers.rs @@ -1,33 +1,17 @@ -use std::{fmt::Write, path::PathBuf}; - use async_trait::async_trait; use derive_new::new; -use harmony_secret::SecretManager; use harmony_types::id::Id; -use log::{debug, error, info, warn}; -use serde::{Deserialize, Serialize}; -use tokio::{fs::File, io::AsyncWriteExt, process::Command}; +use log::info; +use serde::Serialize; use crate::{ - config::secret::{RedhatSecret, SshKeyPair}, - data::{FileContent, FilePath, Version}, - hardware::PhysicalHost, - infra::inventory::InventoryRepositoryFactory, - instrumentation::{HarmonyEvent, instrument}, + data::Version, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::{HostRole, Inventory}, - modules::{ - dhcp::DhcpHostBindingScore, - http::{IPxeMacBootFileScore, StaticFilesHttpScore}, - inventory::LaunchDiscoverInventoryAgentScore, - okd::{ - bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, - templates::{BootstrapIpxeTpl, InstallConfigYaml}, - }, - }, + inventory::Inventory, score::Score, - topology::{HAClusterTopology, HostBinding}, + topology::HAClusterTopology, }; + // ------------------------------------------------------------------------------------------------- // Step 04: Workers // - Render per-MAC PXE & ignition for workers; join nodes. @@ -94,9 +78,6 @@ impl Interpret for OKDSetup04WorkersInterpret { _topology: &HAClusterTopology, ) -> Result { self.render_and_reboot().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Workers provisioned".into(), - )) + Ok(Outcome::success("Workers provisioned".into())) } } diff --git a/harmony/src/modules/okd/bootstrap_05_sanity_check.rs b/harmony/src/modules/okd/bootstrap_05_sanity_check.rs index f1a4c2a..23a24b5 100644 --- a/harmony/src/modules/okd/bootstrap_05_sanity_check.rs +++ b/harmony/src/modules/okd/bootstrap_05_sanity_check.rs @@ -1,33 +1,16 @@ -use std::{fmt::Write, path::PathBuf}; - +use crate::{ + data::Version, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::HAClusterTopology, +}; use async_trait::async_trait; use derive_new::new; -use harmony_secret::SecretManager; use harmony_types::id::Id; -use log::{debug, error, info, warn}; -use serde::{Deserialize, Serialize}; -use tokio::{fs::File, io::AsyncWriteExt, process::Command}; +use log::info; +use serde::Serialize; -use crate::{ - config::secret::{RedhatSecret, SshKeyPair}, - data::{FileContent, FilePath, Version}, - hardware::PhysicalHost, - infra::inventory::InventoryRepositoryFactory, - instrumentation::{HarmonyEvent, instrument}, - interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::{HostRole, Inventory}, - modules::{ - dhcp::DhcpHostBindingScore, - http::{IPxeMacBootFileScore, StaticFilesHttpScore}, - inventory::LaunchDiscoverInventoryAgentScore, - okd::{ - bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, - templates::{BootstrapIpxeTpl, InstallConfigYaml}, - }, - }, - score::Score, - topology::{HAClusterTopology, HostBinding}, -}; // ------------------------------------------------------------------------------------------------- // Step 05: Sanity Check // - Validate API reachability, ClusterOperators, ingress, and SDN status. @@ -93,9 +76,6 @@ impl Interpret for OKDSetup05SanityCheckInterpret { _topology: &HAClusterTopology, ) -> Result { self.run_checks().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Sanity checks passed".into(), - )) + Ok(Outcome::success("Sanity checks passed".into())) } } diff --git a/harmony/src/modules/okd/bootstrap_06_installation_report.rs b/harmony/src/modules/okd/bootstrap_06_installation_report.rs index 2713bd2..07d379c 100644 --- a/harmony/src/modules/okd/bootstrap_06_installation_report.rs +++ b/harmony/src/modules/okd/bootstrap_06_installation_report.rs @@ -1,32 +1,15 @@ -// ------------------------------------------------------------------------------------------------- use async_trait::async_trait; use derive_new::new; -use harmony_secret::SecretManager; use harmony_types::id::Id; -use log::{debug, error, info, warn}; -use serde::{Deserialize, Serialize}; -use std::{fmt::Write, path::PathBuf}; -use tokio::{fs::File, io::AsyncWriteExt, process::Command}; +use log::info; +use serde::Serialize; use crate::{ - config::secret::{RedhatSecret, SshKeyPair}, - data::{FileContent, FilePath, Version}, - hardware::PhysicalHost, - infra::inventory::InventoryRepositoryFactory, - instrumentation::{HarmonyEvent, instrument}, + data::Version, interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::{HostRole, Inventory}, - modules::{ - dhcp::DhcpHostBindingScore, - http::{IPxeMacBootFileScore, StaticFilesHttpScore}, - inventory::LaunchDiscoverInventoryAgentScore, - okd::{ - bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, - templates::{BootstrapIpxeTpl, InstallConfigYaml}, - }, - }, + inventory::Inventory, score::Score, - topology::{HAClusterTopology, HostBinding}, + topology::HAClusterTopology, }; // Step 06: Installation Report @@ -93,9 +76,6 @@ impl Interpret for OKDSetup06InstallationReportInterpret { _topology: &HAClusterTopology, ) -> Result { self.generate().await?; - Ok(Outcome::new( - InterpretStatus::SUCCESS, - "Installation report generated".into(), - )) + Ok(Outcome::success("Installation report generated".into())) } } diff --git a/harmony/src/modules/okd/crd/mod.rs b/harmony/src/modules/okd/crd/mod.rs new file mode 100644 index 0000000..c1a68ce --- /dev/null +++ b/harmony/src/modules/okd/crd/mod.rs @@ -0,0 +1,41 @@ +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +pub mod nmstate; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1", + kind = "OperatorGroup", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct OperatorGroupSpec { + pub target_namespaces: Vec, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "operators.coreos.com", + version = "v1alpha1", + kind = "Subscription", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct SubscriptionSpec { + pub name: String, + pub source: String, + pub source_namespace: String, + pub channel: Option, + pub install_plan_approval: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub enum InstallPlanApproval { + #[serde(rename = "Automatic")] + Automatic, + #[serde(rename = "Manual")] + Manual, +} diff --git a/harmony/src/modules/okd/crd/nmstate.rs b/harmony/src/modules/okd/crd/nmstate.rs new file mode 100644 index 0000000..5f71e4e --- /dev/null +++ b/harmony/src/modules/okd/crd/nmstate.rs @@ -0,0 +1,251 @@ +use std::collections::BTreeMap; + +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)] +#[serde(rename_all = "camelCase")] +pub struct NMStateSpec { + pub probe_configuration: Option, +} + +impl Default for NMState { + fn default() -> Self { + Self { + metadata: Default::default(), + spec: NMStateSpec { + probe_configuration: None, + }, + } + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeConfig { + pub dns: ProbeDns, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProbeDns { + pub host: String, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[kube( + group = "nmstate.io", + version = "v1", + kind = "NodeNetworkConfigurationPolicy", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct NodeNetworkConfigurationPolicySpec { + pub node_selector: Option>, + pub desired_state: DesiredStateSpec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct DesiredStateSpec { + pub interfaces: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InterfaceSpec { + pub name: String, + pub description: Option, + pub r#type: String, + pub state: String, + pub mac_address: Option, + pub mtu: Option, + pub controller: Option, + pub ipv4: Option, + pub ipv6: Option, + pub ethernet: Option, + pub link_aggregation: Option, + pub vlan: Option, + pub vxlan: Option, + pub mac_vtap: Option, + pub mac_vlan: Option, + pub infiniband: Option, + pub linux_bridge: Option, + pub ovs_bridge: Option, + pub ethtool: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpStackSpec { + pub enabled: Option, + pub dhcp: Option, + pub autoconf: Option, + pub address: Option>, + pub auto_dns: Option, + pub auto_gateway: Option, + pub auto_routes: Option, + pub dhcp_client_id: Option, + pub dhcp_duid: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct IpAddressSpec { + pub ip: String, + pub prefix_length: u8, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthernetSpec { + pub speed: Option, + pub duplex: Option, + pub auto_negotiation: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct BondSpec { + pub mode: String, + pub ports: Vec, + pub options: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanSpec { + pub base_iface: String, + pub id: u16, + pub protocol: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VxlanSpec { + pub base_iface: String, + pub id: u32, + pub remote: String, + pub local: Option, + pub learning: Option, + pub destination_port: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVtapSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct MacVlanSpec { + pub base_iface: String, + pub mode: String, + pub promiscuous: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct InfinibandSpec { + pub base_iface: String, + pub pkey: String, + pub mode: String, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgeOptions { + pub mac_ageing_time: Option, + pub multicast_snooping: Option, + pub stp: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct StpOptions { + pub enabled: Option, + pub forward_delay: Option, + pub hello_time: Option, + pub max_age: Option, + pub priority: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePort { + pub name: String, + pub vlan: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct LinuxBridgePortVlan { + pub mode: Option, + pub trunk_tags: Option>, + pub tag: Option, + pub enable_native: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanTag { + pub id: u16, + pub id_range: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct VlanIdRange { + pub min: u16, + pub max: u16, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeSpec { + pub options: Option, + pub ports: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsBridgeOptions { + pub stp: Option, + pub rstp: Option, + pub mcast_snooping_enable: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct OvsPortSpec { + pub name: String, + pub link_aggregation: Option, + pub vlan: Option, + pub r#type: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolSpec { + // TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool) +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub struct EthtoolFecSpec { + pub auto: Option, + pub mode: Option, +} diff --git a/harmony/src/modules/okd/host_network.rs b/harmony/src/modules/okd/host_network.rs new file mode 100644 index 0000000..3bc8c3c --- /dev/null +++ b/harmony/src/modules/okd/host_network.rs @@ -0,0 +1,394 @@ +use async_trait::async_trait; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology}, +}; + +#[derive(Debug, Clone, Serialize)] +pub struct HostNetworkConfigurationScore { + pub hosts: Vec, +} + +impl Score for HostNetworkConfigurationScore { + fn name(&self) -> String { + "HostNetworkConfigurationScore".into() + } + + fn create_interpret(&self) -> Box> { + Box::new(HostNetworkConfigurationInterpret { + score: self.clone(), + }) + } +} + +#[derive(Debug)] +pub struct HostNetworkConfigurationInterpret { + score: HostNetworkConfigurationScore, +} + +impl HostNetworkConfigurationInterpret { + async fn configure_network_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result<(), InterpretError> { + let switch_ports = self.collect_switch_ports_for_host(topology, host).await?; + if !switch_ports.is_empty() { + topology + .configure_host_network(host, HostNetworkConfig { switch_ports }) + .await + .map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?; + } + + Ok(()) + } + + async fn collect_switch_ports_for_host( + &self, + topology: &T, + host: &PhysicalHost, + ) -> Result, InterpretError> { + let mut switch_ports = vec![]; + + for network_interface in &host.network { + let mac_address = network_interface.mac_address; + + match topology.get_port_for_mac_address(&mac_address).await { + Ok(Some(port)) => { + switch_ports.push(SwitchPort { + interface: NetworkInterface { + name: network_interface.name.clone(), + mac_address, + speed_mbps: network_interface.speed_mbps, + mtu: network_interface.mtu, + }, + port, + }); + } + Ok(None) => debug!("No port found for host '{}', skipping", host.id), + Err(e) => { + return Err(InterpretError::new(format!( + "Failed to get port for host '{}': {}", + host.id, e + ))); + } + } + } + + Ok(switch_ports) + } +} + +#[async_trait] +impl Interpret for HostNetworkConfigurationInterpret { + fn get_name(&self) -> InterpretName { + InterpretName::Custom("HostNetworkConfigurationInterpret") + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + vec![] + } + + async fn execute( + &self, + _inventory: &Inventory, + topology: &T, + ) -> Result { + if self.score.hosts.is_empty() { + return Ok(Outcome::noop("No hosts to configure".into())); + } + + info!( + "Started network configuration for {} host(s)...", + self.score.hosts.len() + ); + + topology + .setup_switch() + .await + .map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?; + + let mut configured_host_count = 0; + for host in &self.score.hosts { + self.configure_network_for_host(topology, host).await?; + configured_host_count += 1; + } + + if configured_host_count > 0 { + Ok(Outcome::success(format!( + "Configured {configured_host_count}/{} host(s)", + self.score.hosts.len() + ))) + } else { + Ok(Outcome::noop("No hosts configured".into())) + } + } +} + +#[cfg(test)] +mod tests { + use assertor::*; + use harmony_types::{net::MacAddress, switch::PortLocation}; + use lazy_static::lazy_static; + + use crate::{ + hardware::HostCategory, + topology::{ + HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort, + }, + }; + use std::{ + str::FromStr, + sync::{Arc, Mutex}, + }; + + use super::*; + + lazy_static! { + pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap(); + pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap(); + pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(), + name: "interface-1".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(), + name: "interface-2".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface { + mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(), + name: "unknown-interface".into(), + speed_mbps: None, + mtu: 1, + }; + pub static ref PORT: PortLocation = PortLocation(1, 0, 42); + pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42); + } + + #[tokio::test] + async fn should_setup_switch() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let switch_setup = topology.switch_setup.lock().unwrap(); + assert_that!(*switch_setup).is_true(); + } + + #[tokio::test] + async fn host_with_one_mac_address_should_create_bond_with_one_interface() { + let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]); + let score = given_score(vec![host]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + )]); + } + + #[tokio::test] + async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() { + let score = given_score(vec![given_host( + &HOST_ID, + vec![ + EXISTING_INTERFACE.clone(), + ANOTHER_EXISTING_INTERFACE.clone(), + ], + )]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![ + SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }, + SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }, + ], + }, + )]); + } + + #[tokio::test] + async fn multiple_hosts_should_create_one_bond_per_host() { + let score = given_score(vec![ + given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]), + given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]), + ]); + let topology = TopologyWithSwitch::new(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).contains_exactly(vec![ + ( + HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: EXISTING_INTERFACE.clone(), + port: PORT.clone(), + }], + }, + ), + ( + ANOTHER_HOST_ID.clone(), + HostNetworkConfig { + switch_ports: vec![SwitchPort { + interface: ANOTHER_EXISTING_INTERFACE.clone(), + port: ANOTHER_PORT.clone(), + }], + }, + ), + ]); + } + + #[tokio::test] + async fn port_not_found_for_mac_address_should_not_configure_interface() { + let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]); + let topology = TopologyWithSwitch::new_port_not_found(); + + let _ = score.interpret(&Inventory::empty(), &topology).await; + + let configured_host_networks = topology.configured_host_networks.lock().unwrap(); + assert_that!(*configured_host_networks).is_empty(); + } + + fn given_score(hosts: Vec) -> HostNetworkConfigurationScore { + HostNetworkConfigurationScore { hosts } + } + + fn given_host(id: &Id, network_interfaces: Vec) -> PhysicalHost { + let network = network_interfaces.iter().map(given_interface).collect(); + + PhysicalHost { + id: id.clone(), + category: HostCategory::Server, + network, + storage: vec![], + labels: vec![], + memory_modules: vec![], + cpus: vec![], + } + } + + fn given_interface( + interface: &NetworkInterface, + ) -> harmony_inventory_agent::hwinfo::NetworkInterface { + harmony_inventory_agent::hwinfo::NetworkInterface { + name: interface.name.clone(), + mac_address: interface.mac_address, + speed_mbps: interface.speed_mbps, + is_up: true, + mtu: interface.mtu, + ipv4_addresses: vec![], + ipv6_addresses: vec![], + driver: "driver".into(), + firmware_version: None, + } + } + + struct TopologyWithSwitch { + available_ports: Arc>>, + configured_host_networks: Arc>>, + switch_setup: Arc>, + } + + impl TopologyWithSwitch { + fn new() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + + fn new_port_not_found() -> Self { + Self { + available_ports: Arc::new(Mutex::new(vec![])), + configured_host_networks: Arc::new(Mutex::new(vec![])), + switch_setup: Arc::new(Mutex::new(false)), + } + } + } + + #[async_trait] + impl Topology for TopologyWithSwitch { + fn name(&self) -> &str { + "SwitchWithPortTopology" + } + + async fn ensure_ready(&self) -> Result { + Ok(PreparationOutcome::Success { details: "".into() }) + } + } + + #[async_trait] + impl Switch for TopologyWithSwitch { + async fn setup_switch(&self) -> Result<(), SwitchError> { + let mut switch_configured = self.switch_setup.lock().unwrap(); + *switch_configured = true; + Ok(()) + } + + async fn get_port_for_mac_address( + &self, + _mac_address: &MacAddress, + ) -> Result, SwitchError> { + let mut ports = self.available_ports.lock().unwrap(); + if ports.is_empty() { + return Ok(None); + } + Ok(Some(ports.remove(0))) + } + + async fn configure_host_network( + &self, + host: &PhysicalHost, + config: HostNetworkConfig, + ) -> Result<(), SwitchError> { + let mut configured_host_networks = self.configured_host_networks.lock().unwrap(); + configured_host_networks.push((host.id.clone(), config.clone())); + + Ok(()) + } + } +} diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index 1bd4514..a12f132 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -19,3 +19,5 @@ pub use bootstrap_03_control_plane::*; pub use bootstrap_04_workers::*; pub use bootstrap_05_sanity_check::*; pub use bootstrap_06_installation_report::*; +pub mod crd; +pub mod host_network; diff --git a/harmony/src/modules/prometheus/alerts/k8s/pod.rs b/harmony/src/modules/prometheus/alerts/k8s/pod.rs index 152ec2f..e1dba28 100644 --- a/harmony/src/modules/prometheus/alerts/k8s/pod.rs +++ b/harmony/src/modules/prometheus/alerts/k8s/pod.rs @@ -21,8 +21,8 @@ pub fn pod_failed() -> PrometheusAlertRule { pub fn alert_container_restarting() -> PrometheusAlertRule { PrometheusAlertRule { alert: "ContainerRestarting".into(), - expr: "increase(kube_pod_container_status_restarts_total[5m]) > 3".into(), - r#for: Some("5m".into()), + expr: "increase(kube_pod_container_status_restarts_total[30s]) > 3".into(), + r#for: Some("30s".into()), labels: HashMap::from([("severity".into(), "warning".into())]), annotations: HashMap::from([ ( @@ -42,7 +42,7 @@ pub fn alert_pod_not_ready() -> PrometheusAlertRule { PrometheusAlertRule { alert: "PodNotReady".into(), expr: "kube_pod_status_ready{condition=\"true\"} == 0".into(), - r#for: Some("2m".into()), + r#for: Some("30s".into()), labels: HashMap::from([("severity".into(), "warning".into())]), annotations: HashMap::from([ ("summary".into(), "Pod is not ready".into()), diff --git a/harmony/src/modules/prometheus/rhob_alerting_score.rs b/harmony/src/modules/prometheus/rhob_alerting_score.rs index 97fa644..95908d5 100644 --- a/harmony/src/modules/prometheus/rhob_alerting_score.rs +++ b/harmony/src/modules/prometheus/rhob_alerting_score.rs @@ -1,3 +1,4 @@ +use fqdn::fqdn; use std::fs; use std::{collections::BTreeMap, sync::Arc}; use tempfile::tempdir; @@ -8,11 +9,9 @@ use log::{debug, info}; use serde::Serialize; use std::process::Command; +use crate::modules::k8s::ingress::{K8sIngressScore, PathType}; use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard; use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; -use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{ - Alertmanager, AlertmanagerSpec, -}; use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{ Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig, GrafanaDatasourceSpec, GrafanaSpec, @@ -29,6 +28,7 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{ ServiceMonitor, ServiceMonitorSpec, }; use crate::score::Score; +use crate::topology::ingress::Ingress; use crate::topology::oberservability::monitoring::AlertReceiver; use crate::topology::{K8sclient, Topology, k8s::K8sClient}; use crate::{ @@ -48,8 +48,8 @@ pub struct RHOBAlertingScore { pub prometheus_rules: Vec, } -impl> Score - for RHOBAlertingScore +impl> + Score for RHOBAlertingScore { fn create_interpret(&self) -> Box> { Box::new(RHOBAlertingInterpret { @@ -74,19 +74,20 @@ pub struct RHOBAlertingInterpret { } #[async_trait] -impl> Interpret - for RHOBAlertingInterpret +impl> + Interpret for RHOBAlertingInterpret { async fn execute( &self, - _inventory: &Inventory, + inventory: &Inventory, topology: &T, ) -> Result { let client = topology.k8s_client().await.unwrap(); self.ensure_grafana_operator().await?; - self.install_prometheus(&client).await?; + self.install_prometheus(inventory, topology, &client) + .await?; self.install_client_kube_metrics().await?; - self.install_grafana(&client).await?; + self.install_grafana(inventory, topology, &client).await?; self.install_receivers(&self.sender, &self.receivers) .await?; self.install_rules(&self.prometheus_rules, &client).await?; @@ -212,7 +213,8 @@ impl RHOBAlertingInterpret { let output = Command::new("helm") .args([ - "install", + "upgrade", + "--install", "grafana-operator", "grafana-operator/grafana-operator", "--namespace", @@ -226,7 +228,7 @@ impl RHOBAlertingInterpret { if !output.status.success() { return Err(InterpretError::new(format!( - "helm install failed:\nstdout: {}\nstderr: {}", + "helm upgrade --install failed:\nstdout: {}\nstderr: {}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ))); @@ -238,25 +240,31 @@ impl RHOBAlertingInterpret { ))) } - async fn install_prometheus(&self, client: &Arc) -> Result { + async fn install_prometheus( + &self, + inventory: &Inventory, + topology: &T, + client: &Arc, + ) -> Result { debug!( "installing crd-prometheuses in namespace {}", self.sender.namespace.clone() ); + debug!("building role/rolebinding/serviceaccount for crd-prometheus"); let stack = MonitoringStack { metadata: ObjectMeta { name: Some(format!("{}-monitoring", self.sender.namespace.clone()).into()), namespace: Some(self.sender.namespace.clone()), - labels: Some([("coo".into(), "example".into())].into()), + labels: Some([("monitoring-stack".into(), "true".into())].into()), ..Default::default() }, spec: MonitoringStackSpec { log_level: Some("debug".into()), retention: Some("1d".into()), resource_selector: Some(LabelSelector { - match_labels: [("app".into(), "demo".into())].into(), - ..Default::default() + match_labels: Default::default(), + match_expressions: vec![], }), }, }; @@ -265,6 +273,42 @@ impl RHOBAlertingInterpret { .apply(&stack, Some(&self.sender.namespace.clone())) .await .map_err(|e| InterpretError::new(e.to_string()))?; + + let alert_manager_domain = topology + .get_domain(&format!("alert-manager-{}", self.sender.namespace.clone())) + .await?; + let name = format!("{}-alert-manager", self.sender.namespace.clone()); + let backend_service = format!("alertmanager-operated"); + let namespace = self.sender.namespace.clone(); + let alert_manager_ingress = K8sIngressScore { + name: fqdn!(&name), + host: fqdn!(&alert_manager_domain), + backend_service: fqdn!(&backend_service), + port: 9093, + path: Some("/".to_string()), + path_type: Some(PathType::Prefix), + namespace: Some(fqdn!(&namespace)), + ingress_class_name: Some("openshift-default".to_string()), + }; + + let prometheus_domain = topology + .get_domain(&format!("prometheus-{}", self.sender.namespace.clone())) + .await?; + let name = format!("{}-prometheus", self.sender.namespace.clone()); + let backend_service = format!("prometheus-operated"); + let prometheus_ingress = K8sIngressScore { + name: fqdn!(&name), + host: fqdn!(&prometheus_domain), + backend_service: fqdn!(&backend_service), + port: 9090, + path: Some("/".to_string()), + path_type: Some(PathType::Prefix), + namespace: Some(fqdn!(&namespace)), + ingress_class_name: Some("openshift-default".to_string()), + }; + + alert_manager_ingress.interpret(inventory, topology).await?; + prometheus_ingress.interpret(inventory, topology).await?; info!("installed rhob monitoring stack",); Ok(Outcome::success(format!( "successfully deployed rhob-prometheus {:#?}", @@ -272,31 +316,6 @@ impl RHOBAlertingInterpret { ))) } - async fn install_alert_manager( - &self, - client: &Arc, - ) -> Result { - let am = Alertmanager { - metadata: ObjectMeta { - name: Some(self.sender.namespace.clone()), - labels: Some(std::collections::BTreeMap::from([( - "alertmanagerConfig".to_string(), - "enabled".to_string(), - )])), - namespace: Some(self.sender.namespace.clone()), - ..Default::default() - }, - spec: AlertmanagerSpec::default(), - }; - client - .apply(&am, Some(&self.sender.namespace.clone())) - .await - .map_err(|e| InterpretError::new(e.to_string()))?; - Ok(Outcome::success(format!( - "successfully deployed service monitor {:#?}", - am.metadata.name - ))) - } async fn install_monitors( &self, mut monitors: Vec, @@ -379,7 +398,12 @@ impl RHOBAlertingInterpret { ))) } - async fn install_grafana(&self, client: &Arc) -> Result { + async fn install_grafana( + &self, + inventory: &Inventory, + topology: &T, + client: &Arc, + ) -> Result { let mut label = BTreeMap::new(); label.insert("dashboards".to_string(), "grafana".to_string()); let labels = LabelSelector { @@ -465,6 +489,23 @@ impl RHOBAlertingInterpret { .apply(&grafana, Some(&self.sender.namespace.clone())) .await .map_err(|e| InterpretError::new(e.to_string()))?; + let domain = topology + .get_domain(&format!("grafana-{}", self.sender.namespace.clone())) + .await?; + let name = format!("{}-grafana", self.sender.namespace.clone()); + let backend_service = format!("grafana-{}-service", self.sender.namespace.clone()); + let grafana_ingress = K8sIngressScore { + name: fqdn!(&name), + host: fqdn!(&domain), + backend_service: fqdn!(&backend_service), + port: 3000, + path: Some("/".to_string()), + path_type: Some(PathType::Prefix), + namespace: Some(fqdn!(&namespace)), + ingress_class_name: Some("openshift-default".to_string()), + }; + + grafana_ingress.interpret(inventory, topology).await?; Ok(Outcome::success(format!( "successfully deployed grafana instance {:#?}", grafana.metadata.name diff --git a/harmony_cli/src/cli_logger.rs b/harmony_cli/src/cli_logger.rs index be61c2a..2cb2a93 100644 --- a/harmony_cli/src/cli_logger.rs +++ b/harmony_cli/src/cli_logger.rs @@ -178,10 +178,10 @@ fn handle_events() { ApplicationFeatureStatus::Installing => { info!("Installing feature '{feature}' for '{application}'..."); } - ApplicationFeatureStatus::Installed => { + ApplicationFeatureStatus::Installed { details: _ } => { info!(status = "finished"; "Feature '{feature}' installed"); } - ApplicationFeatureStatus::Failed { details } => { + ApplicationFeatureStatus::Failed { message: details } => { error!(status = "failed"; "Feature '{feature}' installation failed: {details}"); } }, diff --git a/harmony_cli/src/cli_reporter.rs b/harmony_cli/src/cli_reporter.rs new file mode 100644 index 0000000..f6095cc --- /dev/null +++ b/harmony_cli/src/cli_reporter.rs @@ -0,0 +1,56 @@ +use std::sync::Mutex; + +use harmony::{ + instrumentation::{self, HarmonyEvent}, + modules::application::ApplicationFeatureStatus, +}; + +use crate::theme; + +pub fn init() { + let details: Mutex> = Mutex::new(vec![]); + + instrumentation::subscribe("Harmony CLI Reporter", { + move |event| { + let mut details = details.lock().unwrap(); + + match event { + HarmonyEvent::InterpretExecutionFinished { + execution_id: _, + topology: _, + interpret: _, + score: _, + outcome: Ok(outcome), + } => { + if outcome.status == harmony::interpret::InterpretStatus::SUCCESS { + details.extend(outcome.details.clone()); + } + } + HarmonyEvent::ApplicationFeatureStateChanged { + topology: _, + application: _, + feature: _, + status: + ApplicationFeatureStatus::Installed { + details: feature_details, + }, + } => { + details.extend(feature_details.clone()); + } + HarmonyEvent::HarmonyFinished => { + if !details.is_empty() { + println!( + "\n{} All done! Here's what's next for you:", + theme::EMOJI_SUMMARY + ); + for detail in details.iter() { + println!("- {detail}"); + } + println!(); + } + } + _ => {} + }; + } + }); +} diff --git a/harmony_cli/src/lib.rs b/harmony_cli/src/lib.rs index 0bfb1e7..4a0dbe7 100644 --- a/harmony_cli/src/lib.rs +++ b/harmony_cli/src/lib.rs @@ -8,6 +8,7 @@ use inquire::Confirm; use log::debug; pub mod cli_logger; // FIXME: Don't make me pub +mod cli_reporter; pub mod progress; pub mod theme; @@ -116,6 +117,7 @@ pub async fn run_cli( args: Args, ) -> Result<(), Box> { cli_logger::init(); + cli_reporter::init(); let mut maestro = Maestro::initialize(inventory, topology).await.unwrap(); maestro.register_all(scores); diff --git a/harmony_cli/src/theme.rs b/harmony_cli/src/theme.rs index 66eee45..f9368f5 100644 --- a/harmony_cli/src/theme.rs +++ b/harmony_cli/src/theme.rs @@ -9,6 +9,7 @@ pub static EMOJI_ERROR: Emoji<'_, '_> = Emoji("⚠️", ""); pub static EMOJI_DEPLOY: Emoji<'_, '_> = Emoji("🚀", ""); pub static EMOJI_TOPOLOGY: Emoji<'_, '_> = Emoji("📦", ""); pub static EMOJI_SCORE: Emoji<'_, '_> = Emoji("🎶", ""); +pub static EMOJI_SUMMARY: Emoji<'_, '_> = Emoji("🚀", ""); lazy_static! { pub static ref SECTION_STYLE: ProgressStyle = ProgressStyle::default_spinner() diff --git a/harmony_composer/src/harmony_composer_logger.rs b/harmony_composer/src/harmony_composer_logger.rs index 040a167..6351751 100644 --- a/harmony_composer/src/harmony_composer_logger.rs +++ b/harmony_composer/src/harmony_composer_logger.rs @@ -21,7 +21,6 @@ pub fn handle_events() { instrumentation::subscribe("Harmony Composer Logger", { move |event| match event { - HarmonyComposerEvent::HarmonyComposerStarted => {} HarmonyComposerEvent::ProjectInitializationStarted => { progress_tracker.add_section( SETUP_SECTION, diff --git a/harmony_composer/src/instrumentation.rs b/harmony_composer/src/instrumentation.rs index b9164b7..509d39c 100644 --- a/harmony_composer/src/instrumentation.rs +++ b/harmony_composer/src/instrumentation.rs @@ -5,7 +5,6 @@ use crate::{HarmonyProfile, HarmonyTarget}; #[derive(Debug, Clone)] pub enum HarmonyComposerEvent { - HarmonyComposerStarted, ProjectInitializationStarted, ProjectInitialized, ProjectCompilationStarted { diff --git a/harmony_composer/src/main.rs b/harmony_composer/src/main.rs index 4119460..817bb8b 100644 --- a/harmony_composer/src/main.rs +++ b/harmony_composer/src/main.rs @@ -54,6 +54,9 @@ struct DeployArgs { #[arg(long = "profile", short = 'p', default_value = "dev")] harmony_profile: HarmonyProfile, + + #[arg(long = "dry-run", short = 'd', default_value = "false")] + dry_run: bool, } #[derive(Args, Clone, Debug)] @@ -178,6 +181,7 @@ async fn main() { command .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}")) .env("HARMONY_PROFILE", format!("{}", args.harmony_profile)) + .env("HARMONY_DRY_RUN", format!("{}", args.dry_run)) .arg("-y") .arg("-a"); diff --git a/harmony_types/src/id.rs b/harmony_types/src/id.rs index 2cb2674..0a82906 100644 --- a/harmony_types/src/id.rs +++ b/harmony_types/src/id.rs @@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize}; /// /// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per /// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Id { value: String, } diff --git a/harmony_types/src/lib.rs b/harmony_types/src/lib.rs index 7bb1abd..098379a 100644 --- a/harmony_types/src/lib.rs +++ b/harmony_types/src/lib.rs @@ -1,2 +1,3 @@ pub mod id; pub mod net; +pub mod switch; diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index 594a3e2..51de86e 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct MacAddress(pub [u8; 6]); impl MacAddress { @@ -41,7 +41,7 @@ impl TryFrom for MacAddress { bytes[i] = u8::from_str_radix(part, 16).map_err(|_| { std::io::Error::new( std::io::ErrorKind::InvalidInput, - format!("Invalid hex value in part {}: '{}'", i, part), + format!("Invalid hex value in part {i}: '{part}'"), ) })?; } @@ -106,8 +106,8 @@ impl Serialize for Url { impl std::fmt::Display for Url { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Url::LocalFolder(path) => write!(f, "{}", path), - Url::Url(url) => write!(f, "{}", url), + Url::LocalFolder(path) => write!(f, "{path}"), + Url::Url(url) => write!(f, "{url}"), } } } diff --git a/harmony_types/src/switch.rs b/harmony_types/src/switch.rs new file mode 100644 index 0000000..2d32754 --- /dev/null +++ b/harmony_types/src/switch.rs @@ -0,0 +1,176 @@ +use std::{fmt, str::FromStr}; + +/// Simple error type for port parsing failures. +#[derive(Debug)] +pub enum PortParseError { + /// The port string did not conform to the expected S/M/P or range format. + InvalidFormat, + /// A stack, module, or port segment could not be parsed as a number. + InvalidSegment(String), +} + +impl fmt::Display for PortParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortParseError::InvalidFormat => write!(f, "Port string is in an unexpected format."), + PortParseError::InvalidSegment(s) => write!(f, "Invalid segment in port string: {}", s), + } + } +} + +/// Represents the atomic, physical location of a switch port: `//`. +/// +/// Example: `1/1/1` +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct PortLocation(pub u8, pub u8, pub u8); + +impl fmt::Display for PortLocation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}/{}", self.0, self.1, self.2) + } +} + +impl FromStr for PortLocation { + type Err = PortParseError; + + /// Parses a string slice into a `PortLocation`. + /// + /// # Examples + /// + /// ```rust + /// use std::str::FromStr; + /// use harmony_types::switch::PortLocation; + /// + /// assert_eq!(PortLocation::from_str("1/1/1").unwrap(), PortLocation(1, 1, 1)); + /// assert_eq!(PortLocation::from_str("12/5/48").unwrap(), PortLocation(12, 5, 48)); + /// assert!(PortLocation::from_str("1/A/1").is_err()); + /// ``` + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('/').collect(); + + if parts.len() != 3 { + return Err(PortParseError::InvalidFormat); + } + + let parse_segment = |part: &str| -> Result { + u8::from_str(part).map_err(|_| PortParseError::InvalidSegment(part.to_string())) + }; + + let stack = parse_segment(parts[0])?; + let module = parse_segment(parts[1])?; + let port = parse_segment(parts[2])?; + + Ok(PortLocation(stack, module, port)) + } +} + +/// Represents a Port configuration input, which can be a single port, a sequential range, +/// or an explicit set defined by endpoints. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub enum PortDeclaration { + /// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)` + Single(PortLocation), + /// A strictly sequential range defined by two endpoints using the hyphen separator (`-`). + /// All ports between the endpoints (inclusive) are implicitly included. + /// Example: `PortDeclaration::Range(1/1/1, 1/1/4)` + Range(PortLocation, PortLocation), + /// A set of ports defined by two endpoints using the asterisk separator (`*`). + /// The actual member ports must be determined contextually (e.g., from MAC tables or + /// explicit configuration lists). + /// Example: `PortDeclaration::Set(1/1/1, 1/1/3)` where only ports 1 and 3 might be active. + Set(PortLocation, PortLocation), +} + +impl PortDeclaration { + /// Parses a port configuration string into a structured `PortDeclaration` enum. + /// + /// This function performs only basic format and numerical parsing, assuming the input + /// strings (e.g., from `show` commands) are semantically valid and logically ordered. + /// + /// # Supported Formats + /// + /// * **Single Port:** `"1/1/1"` + /// * **Range (Hyphen, `-`):** `"1/1/1-1/1/4"` + /// * **Set (Asterisk, `*`):** `"1/1/1*1/1/4"` + /// + /// # Errors + /// + /// Returns `PortParseError` if the string format is incorrect or numerical segments + /// cannot be parsed. + /// + /// # Examples + /// + /// ```rust + /// use harmony_types::switch::{PortDeclaration, PortLocation}; + /// + /// // Single Port + /// assert_eq!(PortDeclaration::parse("3/2/15").unwrap(), PortDeclaration::Single(PortLocation(3, 2, 15))); + /// + /// // Range (Hyphen) - implies sequential ports + /// let result_range = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + /// assert_eq!(result_range, PortDeclaration::Range(PortLocation(1, 1, 1), PortLocation(1, 1, 4))); + /// + /// // Set (Asterisk) - implies non-sequential set defined by endpoints + /// let result_set = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + /// assert_eq!(result_set, PortDeclaration::Set(PortLocation(1, 1, 48), PortLocation(2, 1, 48))); + /// + /// // Invalid Format (will still fail basic parsing) + /// assert!(PortDeclaration::parse("1/1/1/1").is_err()); + /// ``` + pub fn parse(port_str: &str) -> Result { + if let Some((start_str, end_str)) = port_str.split_once('-') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Range(start_port, end_port)); + } + + if let Some((start_str, end_str)) = port_str.split_once('*') { + let start_port = PortLocation::from_str(start_str.trim())?; + let end_port = PortLocation::from_str(end_str.trim())?; + return Ok(PortDeclaration::Set(start_port, end_port)); + } + + let location = PortLocation::from_str(port_str)?; + Ok(PortDeclaration::Single(location)) + } +} + +impl fmt::Display for PortDeclaration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortDeclaration::Single(port) => write!(f, "{port}"), + PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"), + PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_port_location_invalid() { + assert!(PortLocation::from_str("1/1").is_err()); + assert!(PortLocation::from_str("1/A/1").is_err()); + assert!(PortLocation::from_str("1/1/256").is_err()); + } + + #[test] + fn test_parse_declaration_single() { + let single_result = PortDeclaration::parse("1/1/4").unwrap(); + assert!(matches!(single_result, PortDeclaration::Single(_))); + } + + #[test] + fn test_parse_declaration_range() { + let range_result = PortDeclaration::parse("1/1/1-1/1/4").unwrap(); + assert!(matches!(range_result, PortDeclaration::Range(_, _))); + } + + #[test] + fn test_parse_declaration_set() { + let set_result = PortDeclaration::parse("1/1/48*2/1/48").unwrap(); + assert!(matches!(set_result, PortDeclaration::Set(_, _))); + } +} diff --git a/k3d/src/lib.rs b/k3d/src/lib.rs index 7117d72..63611f4 100644 --- a/k3d/src/lib.rs +++ b/k3d/src/lib.rs @@ -2,8 +2,8 @@ mod downloadable_asset; use downloadable_asset::*; use kube::Client; -use log::debug; -use std::path::PathBuf; +use log::{debug, info}; +use std::{ffi::OsStr, path::PathBuf}; const K3D_BIN_FILE_NAME: &str = "k3d"; @@ -213,15 +213,19 @@ impl K3d { } } + let client; if !self.is_cluster_initialized() { debug!("Cluster is not initialized, initializing now"); - return self.initialize_cluster().await; + client = self.initialize_cluster().await?; + } else { + self.start_cluster().await?; + + debug!("K3d and cluster are already properly set up"); + client = self.create_kubernetes_client().await?; } - self.start_cluster().await?; - - debug!("K3d and cluster are already properly set up"); - self.create_kubernetes_client().await + self.ensure_k3d_config_is_default(self.get_cluster_name()?)?; + Ok(client) } // Private helper methods @@ -302,7 +306,16 @@ impl K3d { S: AsRef, { let binary_path = self.get_k3d_binary()?; - let output = std::process::Command::new(binary_path).args(args).output(); + self.run_command(binary_path, args) + } + + pub fn run_command(&self, cmd: C, args: I) -> Result + where + I: IntoIterator, + S: AsRef, + C: AsRef, + { + let output = std::process::Command::new(cmd).args(args).output(); match output { Ok(output) => { let stderr = String::from_utf8_lossy(&output.stderr); @@ -311,7 +324,7 @@ impl K3d { debug!("stdout : {}", stdout); Ok(output) } - Err(e) => Err(format!("Failed to execute k3d command: {}", e)), + Err(e) => Err(format!("Failed to execute command: {}", e)), } } @@ -323,12 +336,38 @@ impl K3d { return Err(format!("Failed to create cluster: {}", stderr)); } - debug!("Successfully created k3d cluster '{}'", cluster_name); + info!("Successfully created k3d cluster '{}'", cluster_name); + Ok(()) + } + + fn ensure_k3d_config_is_default(&self, cluster_name: &str) -> Result<(), String> { + let output = self.run_k3d_command(["kubeconfig", "merge", "-d", cluster_name])?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("Failed to setup k3d kubeconfig : {}", stderr)); + } + + let output = self.run_command( + "kubectl", + ["config", "use-context", &format!("k3d-{cluster_name}")], + )?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!( + "Failed to switch kubectl context to k3d : {}", + stderr + )); + } + info!( + "kubectl is now using 'k3d-{}' as default context", + cluster_name + ); Ok(()) } async fn create_kubernetes_client(&self) -> Result { - // TODO: Connect the client to the right k3d cluster (see https://git.nationtech.io/NationTech/harmony/issues/92) Client::try_default() .await .map_err(|e| format!("Failed to create Kubernetes client: {}", e))