forked from NationTech/harmony
		
	Compare commits
	
		
			44 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| c069207f12 | |||
| 
						 | 
					7368184917 | ||
| 05205f4ac1 | |||
| 3174645c97 | |||
| 7536f4ec4b | |||
| 464347d3e5 | |||
| 7f415f5b98 | |||
| 2a520a1d7c | |||
| 987f195e2f | |||
| 14d1823d15 | |||
| 2a48d51479 | |||
| 20a227bb41 | |||
| ce91ee0168 | |||
| ed7f81aa1f | |||
| cb66b7592e | |||
| a815f6ac9c | |||
| 2d891e4463 | |||
| f66e58b9ca | |||
| ea39d93aa7 | |||
| 6989d208cf | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| c0bd8007c7 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| cf576192a8 | |||
| 5f78300d78 | |||
| f7e9669009 | |||
| 2d3c32469c | |||
| f65e16df7b | |||
| 1cec398d4d | |||
| 58b6268989 | |||
| cbbaae2ac8 | |||
| 4a500e4eb7 | |||
| f073b7e5fb | |||
| c84b2413ed | |||
| f83fd09f11 | |||
| c15bd53331 | 
							
								
								
									
										65
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										65
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							@ -429,6 +429,15 @@ dependencies = [
 | 
			
		||||
 "wait-timeout",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "assertor"
 | 
			
		||||
version = "0.0.4"
 | 
			
		||||
source = "registry+https://github.com/rust-lang/crates.io-index"
 | 
			
		||||
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "num-traits",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "async-broadcast"
 | 
			
		||||
version = "0.7.2"
 | 
			
		||||
@ -665,6 +674,22 @@ dependencies = [
 | 
			
		||||
 "serde_with",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "brocade"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "async-trait",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
 "harmony_secret",
 | 
			
		||||
 "harmony_types",
 | 
			
		||||
 "log",
 | 
			
		||||
 "regex",
 | 
			
		||||
 "russh",
 | 
			
		||||
 "russh-keys",
 | 
			
		||||
 "serde",
 | 
			
		||||
 "tokio",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "brotli"
 | 
			
		||||
version = "8.0.2"
 | 
			
		||||
@ -1755,6 +1780,7 @@ dependencies = [
 | 
			
		||||
name = "example-nanodc"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "brocade",
 | 
			
		||||
 "cidr",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
 "harmony",
 | 
			
		||||
@ -1763,6 +1789,7 @@ dependencies = [
 | 
			
		||||
 "harmony_tui",
 | 
			
		||||
 "harmony_types",
 | 
			
		||||
 "log",
 | 
			
		||||
 "serde",
 | 
			
		||||
 "tokio",
 | 
			
		||||
 "url",
 | 
			
		||||
]
 | 
			
		||||
@ -1781,6 +1808,7 @@ dependencies = [
 | 
			
		||||
name = "example-okd-install"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "brocade",
 | 
			
		||||
 "cidr",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
 "harmony",
 | 
			
		||||
@ -1795,17 +1823,32 @@ dependencies = [
 | 
			
		||||
 "url",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "example-openbao"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "harmony",
 | 
			
		||||
 "harmony_cli",
 | 
			
		||||
 "harmony_macros",
 | 
			
		||||
 "harmony_types",
 | 
			
		||||
 "tokio",
 | 
			
		||||
 "url",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "example-opnsense"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "brocade",
 | 
			
		||||
 "cidr",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
 "harmony",
 | 
			
		||||
 "harmony_macros",
 | 
			
		||||
 "harmony_secret",
 | 
			
		||||
 "harmony_tui",
 | 
			
		||||
 "harmony_types",
 | 
			
		||||
 "log",
 | 
			
		||||
 "serde",
 | 
			
		||||
 "tokio",
 | 
			
		||||
 "url",
 | 
			
		||||
]
 | 
			
		||||
@ -1814,6 +1857,7 @@ dependencies = [
 | 
			
		||||
name = "example-pxe"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "brocade",
 | 
			
		||||
 "cidr",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
 "harmony",
 | 
			
		||||
@ -1828,6 +1872,15 @@ dependencies = [
 | 
			
		||||
 "url",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "example-remove-rook-osd"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "harmony",
 | 
			
		||||
 "harmony_cli",
 | 
			
		||||
 "tokio",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "example-rust"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
@ -2305,9 +2358,11 @@ name = "harmony"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "askama",
 | 
			
		||||
 "assertor",
 | 
			
		||||
 "async-trait",
 | 
			
		||||
 "base64 0.22.1",
 | 
			
		||||
 "bollard",
 | 
			
		||||
 "brocade",
 | 
			
		||||
 "chrono",
 | 
			
		||||
 "cidr",
 | 
			
		||||
 "convert_case",
 | 
			
		||||
@ -2338,6 +2393,7 @@ dependencies = [
 | 
			
		||||
 "once_cell",
 | 
			
		||||
 "opnsense-config",
 | 
			
		||||
 "opnsense-config-xml",
 | 
			
		||||
 "option-ext",
 | 
			
		||||
 "pretty_assertions",
 | 
			
		||||
 "reqwest 0.11.27",
 | 
			
		||||
 "russh",
 | 
			
		||||
@ -3878,6 +3934,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
 | 
			
		||||
name = "opnsense-config"
 | 
			
		||||
version = "0.1.0"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "assertor",
 | 
			
		||||
 "async-trait",
 | 
			
		||||
 "chrono",
 | 
			
		||||
 "env_logger",
 | 
			
		||||
@ -4537,9 +4594,9 @@ dependencies = [
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "regex"
 | 
			
		||||
version = "1.11.2"
 | 
			
		||||
version = "1.11.3"
 | 
			
		||||
source = "registry+https://github.com/rust-lang/crates.io-index"
 | 
			
		||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
 | 
			
		||||
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "aho-corasick 1.1.3",
 | 
			
		||||
 "memchr",
 | 
			
		||||
@ -4549,9 +4606,9 @@ dependencies = [
 | 
			
		||||
 | 
			
		||||
[[package]]
 | 
			
		||||
name = "regex-automata"
 | 
			
		||||
version = "0.4.10"
 | 
			
		||||
version = "0.4.11"
 | 
			
		||||
source = "registry+https://github.com/rust-lang/crates.io-index"
 | 
			
		||||
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
 | 
			
		||||
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "aho-corasick 1.1.3",
 | 
			
		||||
 "memchr",
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										15
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								Cargo.toml
									
									
									
									
									
								
							@ -14,7 +14,9 @@ members = [
 | 
			
		||||
  "harmony_composer",
 | 
			
		||||
  "harmony_inventory_agent",
 | 
			
		||||
  "harmony_secret_derive",
 | 
			
		||||
  "harmony_secret", "adr/agent_discovery/mdns",
 | 
			
		||||
  "harmony_secret",
 | 
			
		||||
  "adr/agent_discovery/mdns",
 | 
			
		||||
  "brocade",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[workspace.package]
 | 
			
		||||
@ -66,5 +68,12 @@ thiserror = "2.0.14"
 | 
			
		||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
 | 
			
		||||
serde_json = "1.0.127"
 | 
			
		||||
askama = "0.14"
 | 
			
		||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
 | 
			
		||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
 | 
			
		||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
 | 
			
		||||
reqwest = { version = "0.12", features = [
 | 
			
		||||
  "blocking",
 | 
			
		||||
  "stream",
 | 
			
		||||
  "rustls-tls",
 | 
			
		||||
  "http2",
 | 
			
		||||
  "json",
 | 
			
		||||
], default-features = false }
 | 
			
		||||
assertor = "0.0.4"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										18
									
								
								brocade/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								brocade/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,18 @@
 | 
			
		||||
[package]
 | 
			
		||||
name = "brocade"
 | 
			
		||||
edition = "2024"
 | 
			
		||||
version.workspace = true
 | 
			
		||||
readme.workspace = true
 | 
			
		||||
license.workspace = true
 | 
			
		||||
 | 
			
		||||
[dependencies]
 | 
			
		||||
async-trait.workspace = true
 | 
			
		||||
harmony_types = { path = "../harmony_types" }
 | 
			
		||||
russh.workspace = true
 | 
			
		||||
russh-keys.workspace = true
 | 
			
		||||
tokio.workspace = true
 | 
			
		||||
log.workspace = true
 | 
			
		||||
env_logger.workspace = true
 | 
			
		||||
regex = "1.11.3"
 | 
			
		||||
harmony_secret = { path = "../harmony_secret" }
 | 
			
		||||
serde.workspace = true
 | 
			
		||||
							
								
								
									
										70
									
								
								brocade/examples/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								brocade/examples/main.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,70 @@
 | 
			
		||||
use std::net::{IpAddr, Ipv4Addr};
 | 
			
		||||
 | 
			
		||||
use brocade::BrocadeOptions;
 | 
			
		||||
use harmony_secret::{Secret, SecretManager};
 | 
			
		||||
use harmony_types::switch::PortLocation;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
 | 
			
		||||
struct BrocadeSwitchAuth {
 | 
			
		||||
    username: String,
 | 
			
		||||
    password: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[tokio::main]
 | 
			
		||||
async fn main() {
 | 
			
		||||
    env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
 | 
			
		||||
 | 
			
		||||
    // let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
 | 
			
		||||
    let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
 | 
			
		||||
    // let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
 | 
			
		||||
    let switch_addresses = vec![ip];
 | 
			
		||||
 | 
			
		||||
    let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
 | 
			
		||||
        .await
 | 
			
		||||
        .unwrap();
 | 
			
		||||
 | 
			
		||||
    let brocade = brocade::init(
 | 
			
		||||
        &switch_addresses,
 | 
			
		||||
        22,
 | 
			
		||||
        &config.username,
 | 
			
		||||
        &config.password,
 | 
			
		||||
        Some(BrocadeOptions {
 | 
			
		||||
            dry_run: true,
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }),
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .expect("Brocade client failed to connect");
 | 
			
		||||
 | 
			
		||||
    let entries = brocade.get_stack_topology().await.unwrap();
 | 
			
		||||
    println!("Stack topology: {entries:#?}");
 | 
			
		||||
 | 
			
		||||
    let entries = brocade.get_interfaces().await.unwrap();
 | 
			
		||||
    println!("Interfaces: {entries:#?}");
 | 
			
		||||
 | 
			
		||||
    let version = brocade.version().await.unwrap();
 | 
			
		||||
    println!("Version: {version:?}");
 | 
			
		||||
 | 
			
		||||
    println!("--------------");
 | 
			
		||||
    let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
 | 
			
		||||
    println!("VLAN\tMAC\t\t\tPORT");
 | 
			
		||||
    for mac in mac_adddresses {
 | 
			
		||||
        println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    println!("--------------");
 | 
			
		||||
    let channel_name = "1";
 | 
			
		||||
    brocade.clear_port_channel(channel_name).await.unwrap();
 | 
			
		||||
 | 
			
		||||
    println!("--------------");
 | 
			
		||||
    let channel_id = brocade.find_available_channel_id().await.unwrap();
 | 
			
		||||
 | 
			
		||||
    println!("--------------");
 | 
			
		||||
    let channel_name = "HARMONY_LAG";
 | 
			
		||||
    let ports = [PortLocation(2, 0, 35)];
 | 
			
		||||
    brocade
 | 
			
		||||
        .create_port_channel(channel_id, channel_name, &ports)
 | 
			
		||||
        .await
 | 
			
		||||
        .unwrap();
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										212
									
								
								brocade/src/fast_iron.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										212
									
								
								brocade/src/fast_iron.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,212 @@
 | 
			
		||||
use super::BrocadeClient;
 | 
			
		||||
use crate::{
 | 
			
		||||
    BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
 | 
			
		||||
    PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::switch::{PortDeclaration, PortLocation};
 | 
			
		||||
use log::{debug, info};
 | 
			
		||||
use regex::Regex;
 | 
			
		||||
use std::{collections::HashSet, str::FromStr};
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct FastIronClient {
 | 
			
		||||
    shell: BrocadeShell,
 | 
			
		||||
    version: BrocadeInfo,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl FastIronClient {
 | 
			
		||||
    pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
 | 
			
		||||
        shell.before_all(vec!["skip-page-display".into()]);
 | 
			
		||||
        shell.after_all(vec!["page".into()]);
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            shell,
 | 
			
		||||
            version: version_info,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
 | 
			
		||||
        debug!("[Brocade] Parsing mac address entry: {line}");
 | 
			
		||||
        let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
        if parts.len() < 3 {
 | 
			
		||||
            return None;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let (vlan, mac_address, port) = match parts.len() {
 | 
			
		||||
            3 => (
 | 
			
		||||
                u16::from_str(parts[0]).ok()?,
 | 
			
		||||
                parse_brocade_mac_address(parts[1]).ok()?,
 | 
			
		||||
                parts[2].to_string(),
 | 
			
		||||
            ),
 | 
			
		||||
            _ => (
 | 
			
		||||
                1,
 | 
			
		||||
                parse_brocade_mac_address(parts[0]).ok()?,
 | 
			
		||||
                parts[1].to_string(),
 | 
			
		||||
            ),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let port =
 | 
			
		||||
            PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
 | 
			
		||||
 | 
			
		||||
        match port {
 | 
			
		||||
            Ok(p) => Some(Ok(MacAddressEntry {
 | 
			
		||||
                vlan,
 | 
			
		||||
                mac_address,
 | 
			
		||||
                port: p,
 | 
			
		||||
            })),
 | 
			
		||||
            Err(e) => Some(Err(e)),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
 | 
			
		||||
        debug!("[Brocade] Parsing stack port entry: {line}");
 | 
			
		||||
        let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
        if parts.len() < 10 {
 | 
			
		||||
            return None;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let local_port = PortLocation::from_str(parts[0]).ok()?;
 | 
			
		||||
 | 
			
		||||
        Some(Ok(InterSwitchLink {
 | 
			
		||||
            local_port,
 | 
			
		||||
            remote_port: None,
 | 
			
		||||
        }))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn build_port_channel_commands(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_id: PortChannelId,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        ports: &[PortLocation],
 | 
			
		||||
    ) -> Vec<String> {
 | 
			
		||||
        let mut commands = vec![
 | 
			
		||||
            "configure terminal".to_string(),
 | 
			
		||||
            format!("lag {channel_name} static id {channel_id}"),
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        for port in ports {
 | 
			
		||||
            commands.push(format!("ports ethernet {port}"));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        commands.push(format!("primary-port {}", ports[0]));
 | 
			
		||||
        commands.push("deploy".into());
 | 
			
		||||
        commands.push("exit".into());
 | 
			
		||||
        commands.push("write memory".into());
 | 
			
		||||
        commands.push("exit".into());
 | 
			
		||||
 | 
			
		||||
        commands
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl BrocadeClient for FastIronClient {
 | 
			
		||||
    async fn version(&self) -> Result<BrocadeInfo, Error> {
 | 
			
		||||
        Ok(self.version.clone())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
 | 
			
		||||
        info!("[Brocade] Showing MAC address table...");
 | 
			
		||||
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show mac-address", ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(2)
 | 
			
		||||
            .filter_map(|line| self.parse_mac_entry(line))
 | 
			
		||||
            .collect()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show interface stack-ports", crate::ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(1)
 | 
			
		||||
            .filter_map(|line| self.parse_stack_port_entry(line))
 | 
			
		||||
            .collect()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_interfaces(
 | 
			
		||||
        &self,
 | 
			
		||||
        _interfaces: Vec<(String, PortOperatingMode)>,
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
 | 
			
		||||
        info!("[Brocade] Finding next available channel id...");
 | 
			
		||||
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show lag", ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
        let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
 | 
			
		||||
 | 
			
		||||
        let used_ids: HashSet<u8> = output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .filter_map(|line| {
 | 
			
		||||
                re.captures(line)
 | 
			
		||||
                    .and_then(|c| c.get(1))
 | 
			
		||||
                    .and_then(|id_match| id_match.as_str().parse().ok())
 | 
			
		||||
            })
 | 
			
		||||
            .collect();
 | 
			
		||||
 | 
			
		||||
        let mut next_id: u8 = 1;
 | 
			
		||||
        loop {
 | 
			
		||||
            if !used_ids.contains(&next_id) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            next_id += 1;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Found channel id: {next_id}");
 | 
			
		||||
        Ok(next_id)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn create_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_id: PortChannelId,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        ports: &[PortLocation],
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        info!(
 | 
			
		||||
            "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
 | 
			
		||||
        self.shell
 | 
			
		||||
            .run_commands(commands, ExecutionMode::Privileged)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Port-channel '{channel_name}' configured.");
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
 | 
			
		||||
        info!("[Brocade] Clearing port-channel: {channel_name}");
 | 
			
		||||
 | 
			
		||||
        let commands = vec![
 | 
			
		||||
            "configure terminal".to_string(),
 | 
			
		||||
            format!("no lag {channel_name}"),
 | 
			
		||||
            "write memory".to_string(),
 | 
			
		||||
        ];
 | 
			
		||||
        self.shell
 | 
			
		||||
            .run_commands(commands, ExecutionMode::Privileged)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Port-channel '{channel_name}' cleared.");
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										338
									
								
								brocade/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										338
									
								
								brocade/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,338 @@
 | 
			
		||||
use std::net::IpAddr;
 | 
			
		||||
use std::{
 | 
			
		||||
    fmt::{self, Display},
 | 
			
		||||
    time::Duration,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use crate::network_operating_system::NetworkOperatingSystemClient;
 | 
			
		||||
use crate::{
 | 
			
		||||
    fast_iron::FastIronClient,
 | 
			
		||||
    shell::{BrocadeSession, BrocadeShell},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::net::MacAddress;
 | 
			
		||||
use harmony_types::switch::{PortDeclaration, PortLocation};
 | 
			
		||||
use regex::Regex;
 | 
			
		||||
 | 
			
		||||
mod fast_iron;
 | 
			
		||||
mod network_operating_system;
 | 
			
		||||
mod shell;
 | 
			
		||||
mod ssh;
 | 
			
		||||
 | 
			
		||||
#[derive(Default, Clone, Debug)]
 | 
			
		||||
pub struct BrocadeOptions {
 | 
			
		||||
    pub dry_run: bool,
 | 
			
		||||
    pub ssh: ssh::SshOptions,
 | 
			
		||||
    pub timeouts: TimeoutConfig,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug)]
 | 
			
		||||
pub struct TimeoutConfig {
 | 
			
		||||
    pub shell_ready: Duration,
 | 
			
		||||
    pub command_execution: Duration,
 | 
			
		||||
    pub command_output: Duration,
 | 
			
		||||
    pub cleanup: Duration,
 | 
			
		||||
    pub message_wait: Duration,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Default for TimeoutConfig {
 | 
			
		||||
    fn default() -> Self {
 | 
			
		||||
        Self {
 | 
			
		||||
            shell_ready: Duration::from_secs(10),
 | 
			
		||||
            command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
 | 
			
		||||
            command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
 | 
			
		||||
            cleanup: Duration::from_secs(10),
 | 
			
		||||
            message_wait: Duration::from_millis(500),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
enum ExecutionMode {
 | 
			
		||||
    Regular,
 | 
			
		||||
    Privileged,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug)]
 | 
			
		||||
pub struct BrocadeInfo {
 | 
			
		||||
    os: BrocadeOs,
 | 
			
		||||
    version: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug)]
 | 
			
		||||
pub enum BrocadeOs {
 | 
			
		||||
    NetworkOperatingSystem,
 | 
			
		||||
    FastIron,
 | 
			
		||||
    Unknown,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
 | 
			
		||||
pub struct MacAddressEntry {
 | 
			
		||||
    pub vlan: u16,
 | 
			
		||||
    pub mac_address: MacAddress,
 | 
			
		||||
    pub port: PortDeclaration,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub type PortChannelId = u8;
 | 
			
		||||
 | 
			
		||||
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
 | 
			
		||||
///
 | 
			
		||||
/// This structure provides a standardized view of the topology regardless of the
 | 
			
		||||
/// underlying Brocade OS configuration (stacking vs. fabric).
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, Clone)]
 | 
			
		||||
pub struct InterSwitchLink {
 | 
			
		||||
    /// The local port on the switch where the topology command was run.
 | 
			
		||||
    pub local_port: PortLocation,
 | 
			
		||||
    /// The port on the directly connected neighboring switch.
 | 
			
		||||
    pub remote_port: Option<PortLocation>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Represents the key running configuration status of a single switch interface.
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, Clone)]
 | 
			
		||||
pub struct InterfaceInfo {
 | 
			
		||||
    /// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    /// The physical location of the interface.
 | 
			
		||||
    pub port_location: PortLocation,
 | 
			
		||||
    /// The parsed type and name prefix of the interface.
 | 
			
		||||
    pub interface_type: InterfaceType,
 | 
			
		||||
    /// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
 | 
			
		||||
    pub operating_mode: Option<PortOperatingMode>,
 | 
			
		||||
    /// Indicates the current state of the interface.
 | 
			
		||||
    pub status: InterfaceStatus,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Categorizes the functional type of a switch interface.
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, Clone)]
 | 
			
		||||
pub enum InterfaceType {
 | 
			
		||||
    /// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
 | 
			
		||||
    Ethernet(String),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl fmt::Display for InterfaceType {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            InterfaceType::Ethernet(name) => write!(f, "{name}"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, Clone)]
 | 
			
		||||
pub enum PortOperatingMode {
 | 
			
		||||
    /// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
 | 
			
		||||
    Fabric,
 | 
			
		||||
    /// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
 | 
			
		||||
    Trunk,
 | 
			
		||||
    /// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
 | 
			
		||||
    Access,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Defines the possible status of an interface.
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, Clone)]
 | 
			
		||||
pub enum InterfaceStatus {
 | 
			
		||||
    /// The interface is connected.
 | 
			
		||||
    Connected,
 | 
			
		||||
    /// The interface is not connected and is not expected to be.
 | 
			
		||||
    NotConnected,
 | 
			
		||||
    /// The interface is not connected but is expected to be (configured with `no shutdown`).
 | 
			
		||||
    SfpAbsent,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub async fn init(
 | 
			
		||||
    ip_addresses: &[IpAddr],
 | 
			
		||||
    port: u16,
 | 
			
		||||
    username: &str,
 | 
			
		||||
    password: &str,
 | 
			
		||||
    options: Option<BrocadeOptions>,
 | 
			
		||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
 | 
			
		||||
    let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
 | 
			
		||||
 | 
			
		||||
    let version_info = shell
 | 
			
		||||
        .with_session(ExecutionMode::Regular, |session| {
 | 
			
		||||
            Box::pin(get_brocade_info(session))
 | 
			
		||||
        })
 | 
			
		||||
        .await?;
 | 
			
		||||
 | 
			
		||||
    Ok(match version_info.os {
 | 
			
		||||
        BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
 | 
			
		||||
        BrocadeOs::NetworkOperatingSystem => {
 | 
			
		||||
            Box::new(NetworkOperatingSystemClient::init(shell, version_info))
 | 
			
		||||
        }
 | 
			
		||||
        BrocadeOs::Unknown => todo!(),
 | 
			
		||||
    })
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait BrocadeClient: std::fmt::Debug {
 | 
			
		||||
    /// Retrieves the operating system and version details from the connected Brocade switch.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This is typically the first call made after establishing a connection to determine
 | 
			
		||||
    /// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Returns
 | 
			
		||||
    ///
 | 
			
		||||
    /// A `BrocadeInfo` structure containing parsed OS type and version string.
 | 
			
		||||
    async fn version(&self) -> Result<BrocadeInfo, Error>;
 | 
			
		||||
 | 
			
		||||
    /// Retrieves the dynamically learned MAC address table from the switch.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This is crucial for discovering where specific network endpoints (MAC addresses)
 | 
			
		||||
    /// are currently located on the physical ports.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Returns
 | 
			
		||||
    ///
 | 
			
		||||
    /// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
 | 
			
		||||
    /// and the associated port name/index.
 | 
			
		||||
    async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
 | 
			
		||||
 | 
			
		||||
    /// Derives the physical connections used to link multiple switches together
 | 
			
		||||
    /// to form a single logical entity (stack, fabric, etc.).
 | 
			
		||||
    ///
 | 
			
		||||
    /// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
 | 
			
		||||
    /// to return a standardized view of the topology.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Returns
 | 
			
		||||
    ///
 | 
			
		||||
    /// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
 | 
			
		||||
    /// If the switch is not stacked, returns an empty vector.
 | 
			
		||||
    async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
 | 
			
		||||
 | 
			
		||||
    /// Retrieves the status for all interfaces
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Returns
 | 
			
		||||
    ///
 | 
			
		||||
    /// A vector of `InterfaceInfo` structures.
 | 
			
		||||
    async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
 | 
			
		||||
 | 
			
		||||
    /// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
 | 
			
		||||
    async fn configure_interfaces(
 | 
			
		||||
        &self,
 | 
			
		||||
        interfaces: Vec<(String, PortOperatingMode)>,
 | 
			
		||||
    ) -> Result<(), Error>;
 | 
			
		||||
 | 
			
		||||
    /// Scans the existing configuration to find the next available (unused)
 | 
			
		||||
    /// Port-Channel ID (`lag` or `trunk`) for assignment.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Returns
 | 
			
		||||
    ///
 | 
			
		||||
    /// The smallest, unassigned `PortChannelId` within the supported range.
 | 
			
		||||
    async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
 | 
			
		||||
 | 
			
		||||
    /// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
 | 
			
		||||
    /// using the specified channel ID and ports.
 | 
			
		||||
    ///
 | 
			
		||||
    /// The resulting configuration must be persistent (saved to startup-config).
 | 
			
		||||
    /// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Parameters
 | 
			
		||||
    ///
 | 
			
		||||
    /// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
 | 
			
		||||
    /// * `channel_name`: A descriptive name for the LAG (used in configuration context).
 | 
			
		||||
    /// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
 | 
			
		||||
    async fn create_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_id: PortChannelId,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        ports: &[PortLocation],
 | 
			
		||||
    ) -> Result<(), Error>;
 | 
			
		||||
 | 
			
		||||
    /// Removes all configuration associated with the specified Port-Channel name.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This operation should be idempotent; attempting to clear a non-existent
 | 
			
		||||
    /// channel should succeed (or return a benign error).
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Parameters
 | 
			
		||||
    ///
 | 
			
		||||
    /// * `channel_name`: The name of the Port-Channel (LAG) to delete.
 | 
			
		||||
    ///
 | 
			
		||||
    async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
 | 
			
		||||
    let output = session.run_command("show version").await?;
 | 
			
		||||
 | 
			
		||||
    if output.contains("Network Operating System") {
 | 
			
		||||
        let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
 | 
			
		||||
            .expect("Invalid regex");
 | 
			
		||||
        let version = re
 | 
			
		||||
            .captures(&output)
 | 
			
		||||
            .and_then(|cap| cap.name("version"))
 | 
			
		||||
            .map(|m| m.as_str().to_string())
 | 
			
		||||
            .unwrap_or_default();
 | 
			
		||||
 | 
			
		||||
        return Ok(BrocadeInfo {
 | 
			
		||||
            os: BrocadeOs::NetworkOperatingSystem,
 | 
			
		||||
            version,
 | 
			
		||||
        });
 | 
			
		||||
    } else if output.contains("ICX") {
 | 
			
		||||
        let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
 | 
			
		||||
            .expect("Invalid regex");
 | 
			
		||||
        let version = re
 | 
			
		||||
            .captures(&output)
 | 
			
		||||
            .and_then(|cap| cap.name("version"))
 | 
			
		||||
            .map(|m| m.as_str().to_string())
 | 
			
		||||
            .unwrap_or_default();
 | 
			
		||||
 | 
			
		||||
        return Ok(BrocadeInfo {
 | 
			
		||||
            os: BrocadeOs::FastIron,
 | 
			
		||||
            version,
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
 | 
			
		||||
    let cleaned_mac = value.replace('.', "");
 | 
			
		||||
 | 
			
		||||
    if cleaned_mac.len() != 12 {
 | 
			
		||||
        return Err(format!("Invalid MAC address: {value}"));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let mut bytes = [0u8; 6];
 | 
			
		||||
    for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
 | 
			
		||||
        let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
 | 
			
		||||
        bytes[i] =
 | 
			
		||||
            u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Ok(MacAddress(bytes))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub enum Error {
 | 
			
		||||
    NetworkError(String),
 | 
			
		||||
    AuthenticationError(String),
 | 
			
		||||
    ConfigurationError(String),
 | 
			
		||||
    TimeoutError(String),
 | 
			
		||||
    UnexpectedError(String),
 | 
			
		||||
    CommandError(String),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Display for Error {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
 | 
			
		||||
            Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
 | 
			
		||||
            Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
 | 
			
		||||
            Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
 | 
			
		||||
            Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
 | 
			
		||||
            Error::CommandError(msg) => write!(f, "{msg}"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<Error> for String {
 | 
			
		||||
    fn from(val: Error) -> Self {
 | 
			
		||||
        format!("{val}")
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl std::error::Error for Error {}
 | 
			
		||||
 | 
			
		||||
impl From<russh::Error> for Error {
 | 
			
		||||
    fn from(value: russh::Error) -> Self {
 | 
			
		||||
        Error::NetworkError(format!("Russh client error: {value}"))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										333
									
								
								brocade/src/network_operating_system.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										333
									
								
								brocade/src/network_operating_system.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,333 @@
 | 
			
		||||
use std::str::FromStr;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::switch::{PortDeclaration, PortLocation};
 | 
			
		||||
use log::{debug, info};
 | 
			
		||||
use regex::Regex;
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
 | 
			
		||||
    InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
 | 
			
		||||
    parse_brocade_mac_address, shell::BrocadeShell,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct NetworkOperatingSystemClient {
 | 
			
		||||
    shell: BrocadeShell,
 | 
			
		||||
    version: BrocadeInfo,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl NetworkOperatingSystemClient {
 | 
			
		||||
    pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
 | 
			
		||||
        shell.before_all(vec!["terminal length 0".into()]);
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            shell,
 | 
			
		||||
            version: version_info,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
 | 
			
		||||
        debug!("[Brocade] Parsing mac address entry: {line}");
 | 
			
		||||
        let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
        if parts.len() < 5 {
 | 
			
		||||
            return None;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let (vlan, mac_address, port) = match parts.len() {
 | 
			
		||||
            5 => (
 | 
			
		||||
                u16::from_str(parts[0]).ok()?,
 | 
			
		||||
                parse_brocade_mac_address(parts[1]).ok()?,
 | 
			
		||||
                parts[4].to_string(),
 | 
			
		||||
            ),
 | 
			
		||||
            _ => (
 | 
			
		||||
                u16::from_str(parts[0]).ok()?,
 | 
			
		||||
                parse_brocade_mac_address(parts[1]).ok()?,
 | 
			
		||||
                parts[5].to_string(),
 | 
			
		||||
            ),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let port =
 | 
			
		||||
            PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
 | 
			
		||||
 | 
			
		||||
        match port {
 | 
			
		||||
            Ok(p) => Some(Ok(MacAddressEntry {
 | 
			
		||||
                vlan,
 | 
			
		||||
                mac_address,
 | 
			
		||||
                port: p,
 | 
			
		||||
            })),
 | 
			
		||||
            Err(e) => Some(Err(e)),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
 | 
			
		||||
        debug!("[Brocade] Parsing inter switch link entry: {line}");
 | 
			
		||||
        let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
        if parts.len() < 10 {
 | 
			
		||||
            return None;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let local_port = PortLocation::from_str(parts[2]).ok()?;
 | 
			
		||||
        let remote_port = PortLocation::from_str(parts[5]).ok()?;
 | 
			
		||||
 | 
			
		||||
        Some(Ok(InterSwitchLink {
 | 
			
		||||
            local_port,
 | 
			
		||||
            remote_port: Some(remote_port),
 | 
			
		||||
        }))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
 | 
			
		||||
        debug!("[Brocade] Parsing interface status entry: {line}");
 | 
			
		||||
        let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
        if parts.len() < 6 {
 | 
			
		||||
            return None;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let interface_type = match parts[0] {
 | 
			
		||||
            "Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
 | 
			
		||||
            "Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
 | 
			
		||||
            _ => return None,
 | 
			
		||||
        };
 | 
			
		||||
        let port_location = PortLocation::from_str(parts[1]).ok()?;
 | 
			
		||||
        let status = match parts[2] {
 | 
			
		||||
            "connected" => InterfaceStatus::Connected,
 | 
			
		||||
            "notconnected" => InterfaceStatus::NotConnected,
 | 
			
		||||
            "sfpAbsent" => InterfaceStatus::SfpAbsent,
 | 
			
		||||
            _ => return None,
 | 
			
		||||
        };
 | 
			
		||||
        let operating_mode = match parts[3] {
 | 
			
		||||
            "ISL" => Some(PortOperatingMode::Fabric),
 | 
			
		||||
            "Trunk" => Some(PortOperatingMode::Trunk),
 | 
			
		||||
            "Access" => Some(PortOperatingMode::Access),
 | 
			
		||||
            "--" => None,
 | 
			
		||||
            _ => return None,
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        Some(Ok(InterfaceInfo {
 | 
			
		||||
            name: format!("{interface_type} {port_location}"),
 | 
			
		||||
            port_location,
 | 
			
		||||
            interface_type,
 | 
			
		||||
            operating_mode,
 | 
			
		||||
            status,
 | 
			
		||||
        }))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn map_configure_interfaces_error(&self, err: Error) -> Error {
 | 
			
		||||
        debug!("[Brocade] {err}");
 | 
			
		||||
 | 
			
		||||
        if let Error::CommandError(message) = &err {
 | 
			
		||||
            if message.contains("switchport")
 | 
			
		||||
                && message.contains("Cannot configure aggregator member")
 | 
			
		||||
            {
 | 
			
		||||
                let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
 | 
			
		||||
 | 
			
		||||
                if let Some(caps) = re.captures(message) {
 | 
			
		||||
                    let interface_type = &caps[1];
 | 
			
		||||
                    let port_location = &caps[2];
 | 
			
		||||
                    let interface = format!("{interface_type} {port_location}");
 | 
			
		||||
 | 
			
		||||
                    return Error::CommandError(format!(
 | 
			
		||||
                        "Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
 | 
			
		||||
                    ));
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        err
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl BrocadeClient for NetworkOperatingSystemClient {
 | 
			
		||||
    async fn version(&self) -> Result<BrocadeInfo, Error> {
 | 
			
		||||
        Ok(self.version.clone())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show mac-address-table", ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(1)
 | 
			
		||||
            .filter_map(|line| self.parse_mac_entry(line))
 | 
			
		||||
            .collect()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show fabric isl", ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(6)
 | 
			
		||||
            .filter_map(|line| self.parse_inter_switch_link_entry(line))
 | 
			
		||||
            .collect()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command(
 | 
			
		||||
                "show interface status rbridge-id all",
 | 
			
		||||
                ExecutionMode::Regular,
 | 
			
		||||
            )
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(2)
 | 
			
		||||
            .filter_map(|line| self.parse_interface_status_entry(line))
 | 
			
		||||
            .collect()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_interfaces(
 | 
			
		||||
        &self,
 | 
			
		||||
        interfaces: Vec<(String, PortOperatingMode)>,
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
 | 
			
		||||
 | 
			
		||||
        let mut commands = vec!["configure terminal".to_string()];
 | 
			
		||||
 | 
			
		||||
        for interface in interfaces {
 | 
			
		||||
            commands.push(format!("interface {}", interface.0));
 | 
			
		||||
 | 
			
		||||
            match interface.1 {
 | 
			
		||||
                PortOperatingMode::Fabric => {
 | 
			
		||||
                    commands.push("fabric isl enable".into());
 | 
			
		||||
                    commands.push("fabric trunk enable".into());
 | 
			
		||||
                }
 | 
			
		||||
                PortOperatingMode::Trunk => {
 | 
			
		||||
                    commands.push("switchport".into());
 | 
			
		||||
                    commands.push("switchport mode trunk".into());
 | 
			
		||||
                    commands.push("no spanning-tree shutdown".into());
 | 
			
		||||
                    commands.push("no fabric isl enable".into());
 | 
			
		||||
                    commands.push("no fabric trunk enable".into());
 | 
			
		||||
                }
 | 
			
		||||
                PortOperatingMode::Access => {
 | 
			
		||||
                    commands.push("switchport".into());
 | 
			
		||||
                    commands.push("switchport mode access".into());
 | 
			
		||||
                    commands.push("switchport access vlan 1".into());
 | 
			
		||||
                    commands.push("no spanning-tree shutdown".into());
 | 
			
		||||
                    commands.push("no fabric isl enable".into());
 | 
			
		||||
                    commands.push("no fabric trunk enable".into());
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            commands.push("no shutdown".into());
 | 
			
		||||
            commands.push("exit".into());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        self.shell
 | 
			
		||||
            .run_commands(commands, ExecutionMode::Regular)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|err| self.map_configure_interfaces_error(err))?;
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Interfaces configured.");
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
 | 
			
		||||
        info!("[Brocade] Finding next available channel id...");
 | 
			
		||||
 | 
			
		||||
        let output = self
 | 
			
		||||
            .shell
 | 
			
		||||
            .run_command("show port-channel summary", ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        let used_ids: Vec<u8> = output
 | 
			
		||||
            .lines()
 | 
			
		||||
            .skip(6)
 | 
			
		||||
            .filter_map(|line| {
 | 
			
		||||
                let parts: Vec<&str> = line.split_whitespace().collect();
 | 
			
		||||
                if parts.len() < 8 {
 | 
			
		||||
                    return None;
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                u8::from_str(parts[0]).ok()
 | 
			
		||||
            })
 | 
			
		||||
            .collect();
 | 
			
		||||
 | 
			
		||||
        let mut next_id: u8 = 1;
 | 
			
		||||
        loop {
 | 
			
		||||
            if !used_ids.contains(&next_id) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            next_id += 1;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Found channel id: {next_id}");
 | 
			
		||||
        Ok(next_id)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn create_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_id: PortChannelId,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        ports: &[PortLocation],
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        info!(
 | 
			
		||||
            "[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
 | 
			
		||||
            ports
 | 
			
		||||
                .iter()
 | 
			
		||||
                .map(|p| format!("{p}"))
 | 
			
		||||
                .collect::<Vec<String>>()
 | 
			
		||||
                .join(", ")
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        let interfaces = self.get_interfaces().await?;
 | 
			
		||||
 | 
			
		||||
        let mut commands = vec![
 | 
			
		||||
            "configure terminal".into(),
 | 
			
		||||
            format!("interface port-channel {}", channel_id),
 | 
			
		||||
            "no shutdown".into(),
 | 
			
		||||
            "exit".into(),
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        for port in ports {
 | 
			
		||||
            let interface = interfaces.iter().find(|i| i.port_location == *port);
 | 
			
		||||
            let Some(interface) = interface else {
 | 
			
		||||
                continue;
 | 
			
		||||
            };
 | 
			
		||||
 | 
			
		||||
            commands.push(format!("interface {}", interface.name));
 | 
			
		||||
            commands.push("no switchport".into());
 | 
			
		||||
            commands.push("no ip address".into());
 | 
			
		||||
            commands.push("no fabric isl enable".into());
 | 
			
		||||
            commands.push("no fabric trunk enable".into());
 | 
			
		||||
            commands.push(format!("channel-group {channel_id} mode active"));
 | 
			
		||||
            commands.push("no shutdown".into());
 | 
			
		||||
            commands.push("exit".into());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        self.shell
 | 
			
		||||
            .run_commands(commands, ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Port-channel '{channel_name}' configured.");
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
 | 
			
		||||
        info!("[Brocade] Clearing port-channel: {channel_name}");
 | 
			
		||||
 | 
			
		||||
        let commands = vec![
 | 
			
		||||
            "configure terminal".into(),
 | 
			
		||||
            format!("no interface port-channel {}", channel_name),
 | 
			
		||||
            "exit".into(),
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        self.shell
 | 
			
		||||
            .run_commands(commands, ExecutionMode::Regular)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        info!("[Brocade] Port-channel '{channel_name}' cleared.");
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										370
									
								
								brocade/src/shell.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										370
									
								
								brocade/src/shell.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,370 @@
 | 
			
		||||
use std::net::IpAddr;
 | 
			
		||||
use std::time::Duration;
 | 
			
		||||
use std::time::Instant;
 | 
			
		||||
 | 
			
		||||
use crate::BrocadeOptions;
 | 
			
		||||
use crate::Error;
 | 
			
		||||
use crate::ExecutionMode;
 | 
			
		||||
use crate::TimeoutConfig;
 | 
			
		||||
use crate::ssh;
 | 
			
		||||
 | 
			
		||||
use log::debug;
 | 
			
		||||
use log::info;
 | 
			
		||||
use russh::ChannelMsg;
 | 
			
		||||
use tokio::time::timeout;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct BrocadeShell {
 | 
			
		||||
    ip: IpAddr,
 | 
			
		||||
    port: u16,
 | 
			
		||||
    username: String,
 | 
			
		||||
    password: String,
 | 
			
		||||
    options: BrocadeOptions,
 | 
			
		||||
    before_all_commands: Vec<String>,
 | 
			
		||||
    after_all_commands: Vec<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl BrocadeShell {
 | 
			
		||||
    pub async fn init(
 | 
			
		||||
        ip_addresses: &[IpAddr],
 | 
			
		||||
        port: u16,
 | 
			
		||||
        username: &str,
 | 
			
		||||
        password: &str,
 | 
			
		||||
        options: Option<BrocadeOptions>,
 | 
			
		||||
    ) -> Result<Self, Error> {
 | 
			
		||||
        let ip = ip_addresses
 | 
			
		||||
            .first()
 | 
			
		||||
            .ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
 | 
			
		||||
 | 
			
		||||
        let base_options = options.unwrap_or_default();
 | 
			
		||||
        let options = ssh::try_init_client(username, password, ip, base_options).await?;
 | 
			
		||||
 | 
			
		||||
        Ok(Self {
 | 
			
		||||
            ip: *ip,
 | 
			
		||||
            port,
 | 
			
		||||
            username: username.to_string(),
 | 
			
		||||
            password: password.to_string(),
 | 
			
		||||
            before_all_commands: vec![],
 | 
			
		||||
            after_all_commands: vec![],
 | 
			
		||||
            options,
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
 | 
			
		||||
        BrocadeSession::open(
 | 
			
		||||
            self.ip,
 | 
			
		||||
            self.port,
 | 
			
		||||
            &self.username,
 | 
			
		||||
            &self.password,
 | 
			
		||||
            self.options.clone(),
 | 
			
		||||
            mode,
 | 
			
		||||
        )
 | 
			
		||||
        .await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
 | 
			
		||||
    where
 | 
			
		||||
        F: FnOnce(
 | 
			
		||||
            &mut BrocadeSession,
 | 
			
		||||
        ) -> std::pin::Pin<
 | 
			
		||||
            Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
 | 
			
		||||
        >,
 | 
			
		||||
    {
 | 
			
		||||
        let mut session = self.open_session(mode).await?;
 | 
			
		||||
 | 
			
		||||
        let _ = session.run_commands(self.before_all_commands.clone()).await;
 | 
			
		||||
        let result = callback(&mut session).await;
 | 
			
		||||
        let _ = session.run_commands(self.after_all_commands.clone()).await;
 | 
			
		||||
 | 
			
		||||
        session.close().await?;
 | 
			
		||||
        result
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
 | 
			
		||||
        let mut session = self.open_session(mode).await?;
 | 
			
		||||
 | 
			
		||||
        let _ = session.run_commands(self.before_all_commands.clone()).await;
 | 
			
		||||
        let result = session.run_command(command).await;
 | 
			
		||||
        let _ = session.run_commands(self.after_all_commands.clone()).await;
 | 
			
		||||
 | 
			
		||||
        session.close().await?;
 | 
			
		||||
        result
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn run_commands(
 | 
			
		||||
        &self,
 | 
			
		||||
        commands: Vec<String>,
 | 
			
		||||
        mode: ExecutionMode,
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        let mut session = self.open_session(mode).await?;
 | 
			
		||||
 | 
			
		||||
        let _ = session.run_commands(self.before_all_commands.clone()).await;
 | 
			
		||||
        let result = session.run_commands(commands).await;
 | 
			
		||||
        let _ = session.run_commands(self.after_all_commands.clone()).await;
 | 
			
		||||
 | 
			
		||||
        session.close().await?;
 | 
			
		||||
        result
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn before_all(&mut self, commands: Vec<String>) {
 | 
			
		||||
        self.before_all_commands = commands;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn after_all(&mut self, commands: Vec<String>) {
 | 
			
		||||
        self.after_all_commands = commands;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub struct BrocadeSession {
 | 
			
		||||
    pub channel: russh::Channel<russh::client::Msg>,
 | 
			
		||||
    pub mode: ExecutionMode,
 | 
			
		||||
    pub options: BrocadeOptions,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl BrocadeSession {
 | 
			
		||||
    pub async fn open(
 | 
			
		||||
        ip: IpAddr,
 | 
			
		||||
        port: u16,
 | 
			
		||||
        username: &str,
 | 
			
		||||
        password: &str,
 | 
			
		||||
        options: BrocadeOptions,
 | 
			
		||||
        mode: ExecutionMode,
 | 
			
		||||
    ) -> Result<Self, Error> {
 | 
			
		||||
        let client = ssh::create_client(ip, port, username, password, &options).await?;
 | 
			
		||||
        let mut channel = client.channel_open_session().await?;
 | 
			
		||||
 | 
			
		||||
        channel
 | 
			
		||||
            .request_pty(false, "vt100", 80, 24, 0, 0, &[])
 | 
			
		||||
            .await?;
 | 
			
		||||
        channel.request_shell(false).await?;
 | 
			
		||||
 | 
			
		||||
        wait_for_shell_ready(&mut channel, &options.timeouts).await?;
 | 
			
		||||
 | 
			
		||||
        if let ExecutionMode::Privileged = mode {
 | 
			
		||||
            try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(Self {
 | 
			
		||||
            channel,
 | 
			
		||||
            mode,
 | 
			
		||||
            options,
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn close(&mut self) -> Result<(), Error> {
 | 
			
		||||
        debug!("[Brocade] Closing session...");
 | 
			
		||||
 | 
			
		||||
        self.channel.data(&b"exit\n"[..]).await?;
 | 
			
		||||
        if let ExecutionMode::Privileged = self.mode {
 | 
			
		||||
            self.channel.data(&b"exit\n"[..]).await?;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let start = Instant::now();
 | 
			
		||||
        while start.elapsed() < self.options.timeouts.cleanup {
 | 
			
		||||
            match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
 | 
			
		||||
                Ok(Some(ChannelMsg::Close)) => break,
 | 
			
		||||
                Ok(Some(_)) => continue,
 | 
			
		||||
                Ok(None) | Err(_) => break,
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        debug!("[Brocade] Session closed.");
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
 | 
			
		||||
        if self.should_skip_command(command) {
 | 
			
		||||
            return Ok(String::new());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        debug!("[Brocade] Running command: '{command}'...");
 | 
			
		||||
 | 
			
		||||
        self.channel
 | 
			
		||||
            .data(format!("{}\n", command).as_bytes())
 | 
			
		||||
            .await?;
 | 
			
		||||
        tokio::time::sleep(Duration::from_millis(100)).await;
 | 
			
		||||
 | 
			
		||||
        let output = self.collect_command_output().await?;
 | 
			
		||||
        let output = String::from_utf8(output)
 | 
			
		||||
            .map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
 | 
			
		||||
 | 
			
		||||
        self.check_for_command_errors(&output, command)?;
 | 
			
		||||
        Ok(output)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
 | 
			
		||||
        for command in commands {
 | 
			
		||||
            self.run_command(&command).await?;
 | 
			
		||||
        }
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn should_skip_command(&self, command: &str) -> bool {
 | 
			
		||||
        if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
 | 
			
		||||
            info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
 | 
			
		||||
            return true;
 | 
			
		||||
        }
 | 
			
		||||
        false
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
 | 
			
		||||
        let mut output = Vec::new();
 | 
			
		||||
        let start = Instant::now();
 | 
			
		||||
        let read_timeout = Duration::from_millis(500);
 | 
			
		||||
        let log_interval = Duration::from_secs(5);
 | 
			
		||||
        let mut last_log = Instant::now();
 | 
			
		||||
 | 
			
		||||
        loop {
 | 
			
		||||
            if start.elapsed() > self.options.timeouts.command_execution {
 | 
			
		||||
                return Err(Error::TimeoutError(
 | 
			
		||||
                    "Timeout waiting for command completion.".into(),
 | 
			
		||||
                ));
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if start.elapsed() > self.options.timeouts.command_output
 | 
			
		||||
                && last_log.elapsed() > log_interval
 | 
			
		||||
            {
 | 
			
		||||
                info!("[Brocade] Waiting for command output...");
 | 
			
		||||
                last_log = Instant::now();
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            match timeout(read_timeout, self.channel.wait()).await {
 | 
			
		||||
                Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
 | 
			
		||||
                    output.extend_from_slice(&data);
 | 
			
		||||
                    let current_output = String::from_utf8_lossy(&output);
 | 
			
		||||
                    if current_output.contains('>') || current_output.contains('#') {
 | 
			
		||||
                        return Ok(output);
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
 | 
			
		||||
                Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
 | 
			
		||||
                    debug!("[Brocade] Command exit status: {exit_status}");
 | 
			
		||||
                }
 | 
			
		||||
                Ok(Some(_)) => continue,
 | 
			
		||||
                Ok(None) | Err(_) => {
 | 
			
		||||
                    if output.is_empty() {
 | 
			
		||||
                        if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
 | 
			
		||||
                            break;
 | 
			
		||||
                        }
 | 
			
		||||
                        continue;
 | 
			
		||||
                    }
 | 
			
		||||
 | 
			
		||||
                    tokio::time::sleep(Duration::from_millis(100)).await;
 | 
			
		||||
                    let current_output = String::from_utf8_lossy(&output);
 | 
			
		||||
                    if current_output.contains('>') || current_output.contains('#') {
 | 
			
		||||
                        return Ok(output);
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(output)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
 | 
			
		||||
        const ERROR_PATTERNS: &[&str] = &[
 | 
			
		||||
            "invalid input",
 | 
			
		||||
            "syntax error",
 | 
			
		||||
            "command not found",
 | 
			
		||||
            "unknown command",
 | 
			
		||||
            "permission denied",
 | 
			
		||||
            "access denied",
 | 
			
		||||
            "authentication failed",
 | 
			
		||||
            "configuration error",
 | 
			
		||||
            "failed to",
 | 
			
		||||
            "error:",
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        let output_lower = output.to_lowercase();
 | 
			
		||||
        if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
 | 
			
		||||
            return Err(Error::CommandError(format!(
 | 
			
		||||
                "Command error: {}",
 | 
			
		||||
                output.trim()
 | 
			
		||||
            )));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if !command.starts_with("show") && output.trim().is_empty() {
 | 
			
		||||
            return Err(Error::CommandError(format!(
 | 
			
		||||
                "Command '{command}' produced no output"
 | 
			
		||||
            )));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async fn wait_for_shell_ready(
 | 
			
		||||
    channel: &mut russh::Channel<russh::client::Msg>,
 | 
			
		||||
    timeouts: &TimeoutConfig,
 | 
			
		||||
) -> Result<(), Error> {
 | 
			
		||||
    let mut buffer = Vec::new();
 | 
			
		||||
    let start = Instant::now();
 | 
			
		||||
 | 
			
		||||
    while start.elapsed() < timeouts.shell_ready {
 | 
			
		||||
        match timeout(timeouts.message_wait, channel.wait()).await {
 | 
			
		||||
            Ok(Some(ChannelMsg::Data { data })) => {
 | 
			
		||||
                buffer.extend_from_slice(&data);
 | 
			
		||||
                let output = String::from_utf8_lossy(&buffer);
 | 
			
		||||
                let output = output.trim();
 | 
			
		||||
                if output.ends_with('>') || output.ends_with('#') {
 | 
			
		||||
                    debug!("[Brocade] Shell ready");
 | 
			
		||||
                    return Ok(());
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            Ok(Some(_)) => continue,
 | 
			
		||||
            Ok(None) => break,
 | 
			
		||||
            Err(_) => continue,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    Ok(())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async fn try_elevate_session(
 | 
			
		||||
    channel: &mut russh::Channel<russh::client::Msg>,
 | 
			
		||||
    username: &str,
 | 
			
		||||
    password: &str,
 | 
			
		||||
    timeouts: &TimeoutConfig,
 | 
			
		||||
) -> Result<(), Error> {
 | 
			
		||||
    channel.data(&b"enable\n"[..]).await?;
 | 
			
		||||
    let start = Instant::now();
 | 
			
		||||
    let mut buffer = Vec::new();
 | 
			
		||||
 | 
			
		||||
    while start.elapsed() < timeouts.shell_ready {
 | 
			
		||||
        match timeout(timeouts.message_wait, channel.wait()).await {
 | 
			
		||||
            Ok(Some(ChannelMsg::Data { data })) => {
 | 
			
		||||
                buffer.extend_from_slice(&data);
 | 
			
		||||
                let output = String::from_utf8_lossy(&buffer);
 | 
			
		||||
 | 
			
		||||
                if output.ends_with('#') {
 | 
			
		||||
                    debug!("[Brocade] Privileged mode established");
 | 
			
		||||
                    return Ok(());
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                if output.contains("User Name:") {
 | 
			
		||||
                    channel.data(format!("{}\n", username).as_bytes()).await?;
 | 
			
		||||
                    buffer.clear();
 | 
			
		||||
                } else if output.contains("Password:") {
 | 
			
		||||
                    channel.data(format!("{}\n", password).as_bytes()).await?;
 | 
			
		||||
                    buffer.clear();
 | 
			
		||||
                } else if output.contains('>') {
 | 
			
		||||
                    return Err(Error::AuthenticationError(
 | 
			
		||||
                        "Enable authentication failed".into(),
 | 
			
		||||
                    ));
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            Ok(Some(_)) => continue,
 | 
			
		||||
            Ok(None) => break,
 | 
			
		||||
            Err(_) => continue,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let output = String::from_utf8_lossy(&buffer);
 | 
			
		||||
    if output.ends_with('#') {
 | 
			
		||||
        debug!("[Brocade] Privileged mode established");
 | 
			
		||||
        Ok(())
 | 
			
		||||
    } else {
 | 
			
		||||
        Err(Error::AuthenticationError(format!(
 | 
			
		||||
            "Enable failed. Output:\n{output}"
 | 
			
		||||
        )))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										113
									
								
								brocade/src/ssh.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								brocade/src/ssh.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,113 @@
 | 
			
		||||
use std::borrow::Cow;
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use russh::client::Handler;
 | 
			
		||||
use russh::kex::DH_G1_SHA1;
 | 
			
		||||
use russh::kex::ECDH_SHA2_NISTP256;
 | 
			
		||||
use russh_keys::key::SSH_RSA;
 | 
			
		||||
 | 
			
		||||
use super::BrocadeOptions;
 | 
			
		||||
use super::Error;
 | 
			
		||||
 | 
			
		||||
#[derive(Default, Clone, Debug)]
 | 
			
		||||
pub struct SshOptions {
 | 
			
		||||
    pub preferred_algorithms: russh::Preferred,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl SshOptions {
 | 
			
		||||
    fn ecdhsa_sha2_nistp256() -> Self {
 | 
			
		||||
        Self {
 | 
			
		||||
            preferred_algorithms: russh::Preferred {
 | 
			
		||||
                kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
 | 
			
		||||
                key: Cow::Borrowed(&[SSH_RSA]),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn legacy() -> Self {
 | 
			
		||||
        Self {
 | 
			
		||||
            preferred_algorithms: russh::Preferred {
 | 
			
		||||
                kex: Cow::Borrowed(&[DH_G1_SHA1]),
 | 
			
		||||
                key: Cow::Borrowed(&[SSH_RSA]),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub struct Client;
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl Handler for Client {
 | 
			
		||||
    type Error = Error;
 | 
			
		||||
 | 
			
		||||
    async fn check_server_key(
 | 
			
		||||
        &mut self,
 | 
			
		||||
        _server_public_key: &russh_keys::key::PublicKey,
 | 
			
		||||
    ) -> Result<bool, Self::Error> {
 | 
			
		||||
        Ok(true)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub async fn try_init_client(
 | 
			
		||||
    username: &str,
 | 
			
		||||
    password: &str,
 | 
			
		||||
    ip: &std::net::IpAddr,
 | 
			
		||||
    base_options: BrocadeOptions,
 | 
			
		||||
) -> Result<BrocadeOptions, Error> {
 | 
			
		||||
    let ssh_options = vec![
 | 
			
		||||
        SshOptions::default(),
 | 
			
		||||
        SshOptions::ecdhsa_sha2_nistp256(),
 | 
			
		||||
        SshOptions::legacy(),
 | 
			
		||||
    ];
 | 
			
		||||
 | 
			
		||||
    for ssh in ssh_options {
 | 
			
		||||
        let opts = BrocadeOptions {
 | 
			
		||||
            ssh,
 | 
			
		||||
            ..base_options.clone()
 | 
			
		||||
        };
 | 
			
		||||
        let client = create_client(*ip, 22, username, password, &opts).await;
 | 
			
		||||
 | 
			
		||||
        match client {
 | 
			
		||||
            Ok(_) => {
 | 
			
		||||
                return Ok(opts);
 | 
			
		||||
            }
 | 
			
		||||
            Err(e) => match e {
 | 
			
		||||
                Error::NetworkError(e) => {
 | 
			
		||||
                    if e.contains("No common key exchange algorithm") {
 | 
			
		||||
                        continue;
 | 
			
		||||
                    } else {
 | 
			
		||||
                        return Err(Error::NetworkError(e));
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                _ => return Err(e),
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Err(Error::NetworkError(
 | 
			
		||||
        "Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
 | 
			
		||||
    ))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub async fn create_client(
 | 
			
		||||
    ip: std::net::IpAddr,
 | 
			
		||||
    port: u16,
 | 
			
		||||
    username: &str,
 | 
			
		||||
    password: &str,
 | 
			
		||||
    options: &BrocadeOptions,
 | 
			
		||||
) -> Result<russh::client::Handle<Client>, Error> {
 | 
			
		||||
    let config = russh::client::Config {
 | 
			
		||||
        preferred: options.ssh.preferred_algorithms.clone(),
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    };
 | 
			
		||||
    let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
 | 
			
		||||
    if !client.authenticate_password(username, password).await? {
 | 
			
		||||
        return Err(Error::AuthenticationError(
 | 
			
		||||
            "ssh authentication failed".to_string(),
 | 
			
		||||
        ));
 | 
			
		||||
    }
 | 
			
		||||
    Ok(client)
 | 
			
		||||
}
 | 
			
		||||
@ -17,3 +17,5 @@ harmony_secret = { path = "../../harmony_secret" }
 | 
			
		||||
log = { workspace = true }
 | 
			
		||||
env_logger = { workspace = true }
 | 
			
		||||
url = { workspace = true }
 | 
			
		||||
serde = { workspace = true }
 | 
			
		||||
brocade = { path = "../../brocade" }
 | 
			
		||||
 | 
			
		||||
@ -3,12 +3,13 @@ use std::{
 | 
			
		||||
    sync::Arc,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use brocade::BrocadeOptions;
 | 
			
		||||
use cidr::Ipv4Cidr;
 | 
			
		||||
use harmony::{
 | 
			
		||||
    config::secret::SshKeyPair,
 | 
			
		||||
    data::{FileContent, FilePath},
 | 
			
		||||
    hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
 | 
			
		||||
    infra::opnsense::OPNSenseManagementInterface,
 | 
			
		||||
    infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    modules::{
 | 
			
		||||
        http::StaticFilesHttpScore,
 | 
			
		||||
@ -22,8 +23,9 @@ use harmony::{
 | 
			
		||||
    topology::{LogicalHost, UnmanagedRouter},
 | 
			
		||||
};
 | 
			
		||||
use harmony_macros::{ip, mac_address};
 | 
			
		||||
use harmony_secret::SecretManager;
 | 
			
		||||
use harmony_secret::{Secret, SecretManager};
 | 
			
		||||
use harmony_types::net::Url;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
#[tokio::main]
 | 
			
		||||
async fn main() {
 | 
			
		||||
@ -32,6 +34,26 @@ async fn main() {
 | 
			
		||||
        name: String::from("fw0"),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
 | 
			
		||||
        .await
 | 
			
		||||
        .expect("Failed to get credentials");
 | 
			
		||||
 | 
			
		||||
    let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
 | 
			
		||||
    let brocade_options = Some(BrocadeOptions {
 | 
			
		||||
        dry_run: *harmony::config::DRY_RUN,
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    });
 | 
			
		||||
    let switch_client = BrocadeSwitchClient::init(
 | 
			
		||||
        &switches,
 | 
			
		||||
        &switch_auth.username,
 | 
			
		||||
        &switch_auth.password,
 | 
			
		||||
        brocade_options,
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .expect("Failed to connect to switch");
 | 
			
		||||
 | 
			
		||||
    let switch_client = Arc::new(switch_client);
 | 
			
		||||
 | 
			
		||||
    let opnsense = Arc::new(
 | 
			
		||||
        harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
 | 
			
		||||
    );
 | 
			
		||||
@ -39,6 +61,7 @@ async fn main() {
 | 
			
		||||
    let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
 | 
			
		||||
    let gateway_ip = IpAddr::V4(gateway_ipv4);
 | 
			
		||||
    let topology = harmony::topology::HAClusterTopology {
 | 
			
		||||
        kubeconfig: None,
 | 
			
		||||
        domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
 | 
			
		||||
        // when setting up the opnsense firewall
 | 
			
		||||
        router: Arc::new(UnmanagedRouter::new(
 | 
			
		||||
@ -83,7 +106,7 @@ async fn main() {
 | 
			
		||||
                name: "wk2".to_string(),
 | 
			
		||||
            },
 | 
			
		||||
        ],
 | 
			
		||||
        switch: vec![],
 | 
			
		||||
        switch_client: switch_client.clone(),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let inventory = Inventory {
 | 
			
		||||
@ -166,3 +189,9 @@ async fn main() {
 | 
			
		||||
    .await
 | 
			
		||||
    .unwrap();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Secret, Serialize, Deserialize, Debug)]
 | 
			
		||||
pub struct BrocadeSwitchAuth {
 | 
			
		||||
    pub username: String,
 | 
			
		||||
    pub password: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -19,3 +19,4 @@ log = { workspace = true }
 | 
			
		||||
env_logger = { workspace = true }
 | 
			
		||||
url = { workspace = true }
 | 
			
		||||
serde.workspace = true
 | 
			
		||||
brocade = { path = "../../brocade" }
 | 
			
		||||
 | 
			
		||||
@ -1,7 +1,8 @@
 | 
			
		||||
use brocade::BrocadeOptions;
 | 
			
		||||
use cidr::Ipv4Cidr;
 | 
			
		||||
use harmony::{
 | 
			
		||||
    hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
 | 
			
		||||
    infra::opnsense::OPNSenseManagementInterface,
 | 
			
		||||
    hardware::{Location, SwitchGroup},
 | 
			
		||||
    infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
 | 
			
		||||
};
 | 
			
		||||
@ -22,6 +23,26 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
        name: String::from("opnsense-1"),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
 | 
			
		||||
        .await
 | 
			
		||||
        .expect("Failed to get credentials");
 | 
			
		||||
 | 
			
		||||
    let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
 | 
			
		||||
    let brocade_options = Some(BrocadeOptions {
 | 
			
		||||
        dry_run: *harmony::config::DRY_RUN,
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    });
 | 
			
		||||
    let switch_client = BrocadeSwitchClient::init(
 | 
			
		||||
        &switches,
 | 
			
		||||
        &switch_auth.username,
 | 
			
		||||
        &switch_auth.password,
 | 
			
		||||
        brocade_options,
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .expect("Failed to connect to switch");
 | 
			
		||||
 | 
			
		||||
    let switch_client = Arc::new(switch_client);
 | 
			
		||||
 | 
			
		||||
    let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
 | 
			
		||||
    let config = config.unwrap();
 | 
			
		||||
 | 
			
		||||
@ -38,6 +59,7 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
    let gateway_ipv4 = ipv4!("192.168.1.1");
 | 
			
		||||
    let gateway_ip = IpAddr::V4(gateway_ipv4);
 | 
			
		||||
    harmony::topology::HAClusterTopology {
 | 
			
		||||
        kubeconfig: None,
 | 
			
		||||
        domain_name: "demo.harmony.mcd".to_string(),
 | 
			
		||||
        router: Arc::new(UnmanagedRouter::new(
 | 
			
		||||
            gateway_ip,
 | 
			
		||||
@ -58,7 +80,7 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
            name: "bootstrap".to_string(),
 | 
			
		||||
        },
 | 
			
		||||
        workers: vec![],
 | 
			
		||||
        switch: vec![],
 | 
			
		||||
        switch_client: switch_client.clone(),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -75,3 +97,9 @@ pub fn get_inventory() -> Inventory {
 | 
			
		||||
        control_plane_host: vec![],
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Secret, Serialize, Deserialize, Debug)]
 | 
			
		||||
pub struct BrocadeSwitchAuth {
 | 
			
		||||
    pub username: String,
 | 
			
		||||
    pub password: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -19,3 +19,4 @@ log = { workspace = true }
 | 
			
		||||
env_logger = { workspace = true }
 | 
			
		||||
url = { workspace = true }
 | 
			
		||||
serde.workspace = true
 | 
			
		||||
brocade = { path = "../../brocade" }
 | 
			
		||||
 | 
			
		||||
@ -1,13 +1,15 @@
 | 
			
		||||
use brocade::BrocadeOptions;
 | 
			
		||||
use cidr::Ipv4Cidr;
 | 
			
		||||
use harmony::{
 | 
			
		||||
    config::secret::OPNSenseFirewallCredentials,
 | 
			
		||||
    hardware::{Location, SwitchGroup},
 | 
			
		||||
    infra::opnsense::OPNSenseManagementInterface,
 | 
			
		||||
    infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
 | 
			
		||||
};
 | 
			
		||||
use harmony_macros::{ip, ipv4};
 | 
			
		||||
use harmony_secret::SecretManager;
 | 
			
		||||
use harmony_secret::{Secret, SecretManager};
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use std::{net::IpAddr, sync::Arc};
 | 
			
		||||
 | 
			
		||||
pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
@ -16,6 +18,26 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
        name: String::from("opnsense-1"),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
 | 
			
		||||
        .await
 | 
			
		||||
        .expect("Failed to get credentials");
 | 
			
		||||
 | 
			
		||||
    let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
 | 
			
		||||
    let brocade_options = Some(BrocadeOptions {
 | 
			
		||||
        dry_run: *harmony::config::DRY_RUN,
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    });
 | 
			
		||||
    let switch_client = BrocadeSwitchClient::init(
 | 
			
		||||
        &switches,
 | 
			
		||||
        &switch_auth.username,
 | 
			
		||||
        &switch_auth.password,
 | 
			
		||||
        brocade_options,
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .expect("Failed to connect to switch");
 | 
			
		||||
 | 
			
		||||
    let switch_client = Arc::new(switch_client);
 | 
			
		||||
 | 
			
		||||
    let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
 | 
			
		||||
    let config = config.unwrap();
 | 
			
		||||
 | 
			
		||||
@ -32,6 +54,7 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
    let gateway_ipv4 = ipv4!("192.168.1.1");
 | 
			
		||||
    let gateway_ip = IpAddr::V4(gateway_ipv4);
 | 
			
		||||
    harmony::topology::HAClusterTopology {
 | 
			
		||||
        kubeconfig: None,
 | 
			
		||||
        domain_name: "demo.harmony.mcd".to_string(),
 | 
			
		||||
        router: Arc::new(UnmanagedRouter::new(
 | 
			
		||||
            gateway_ip,
 | 
			
		||||
@ -52,7 +75,7 @@ pub async fn get_topology() -> HAClusterTopology {
 | 
			
		||||
            name: "cp0".to_string(),
 | 
			
		||||
        },
 | 
			
		||||
        workers: vec![],
 | 
			
		||||
        switch: vec![],
 | 
			
		||||
        switch_client: switch_client.clone(),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -69,3 +92,9 @@ pub fn get_inventory() -> Inventory {
 | 
			
		||||
        control_plane_host: vec![],
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Secret, Serialize, Deserialize, Debug)]
 | 
			
		||||
pub struct BrocadeSwitchAuth {
 | 
			
		||||
    pub username: String,
 | 
			
		||||
    pub password: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										14
									
								
								examples/openbao/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								examples/openbao/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,14 @@
 | 
			
		||||
[package]
 | 
			
		||||
name = "example-openbao"
 | 
			
		||||
edition = "2024"
 | 
			
		||||
version.workspace = true
 | 
			
		||||
readme.workspace = true
 | 
			
		||||
license.workspace = true
 | 
			
		||||
 | 
			
		||||
[dependencies]
 | 
			
		||||
harmony = { path = "../../harmony" }
 | 
			
		||||
harmony_cli = { path = "../../harmony_cli" }
 | 
			
		||||
harmony_macros = { path = "../../harmony_macros" }
 | 
			
		||||
harmony_types = { path = "../../harmony_types" }
 | 
			
		||||
tokio.workspace = true
 | 
			
		||||
url.workspace = true
 | 
			
		||||
							
								
								
									
										7
									
								
								examples/openbao/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								examples/openbao/README.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,7 @@
 | 
			
		||||
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
 | 
			
		||||
 | 
			
		||||
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
 | 
			
		||||
 | 
			
		||||
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
 | 
			
		||||
 | 
			
		||||
https://openbao.org/docs/platform/k8s/helm/run/
 | 
			
		||||
							
								
								
									
										67
									
								
								examples/openbao/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								examples/openbao/src/main.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,67 @@
 | 
			
		||||
use std::{collections::HashMap, str::FromStr};
 | 
			
		||||
 | 
			
		||||
use harmony::{
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
 | 
			
		||||
    topology::K8sAnywhereTopology,
 | 
			
		||||
};
 | 
			
		||||
use harmony_macros::hurl;
 | 
			
		||||
 | 
			
		||||
#[tokio::main]
 | 
			
		||||
async fn main() {
 | 
			
		||||
    let values_yaml = Some(
 | 
			
		||||
        r#"server:
 | 
			
		||||
  standalone:
 | 
			
		||||
    enabled: true
 | 
			
		||||
    config: |
 | 
			
		||||
      listener "tcp" {
 | 
			
		||||
        tls_disable = true
 | 
			
		||||
        address = "[::]:8200"
 | 
			
		||||
        cluster_address = "[::]:8201"
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      storage "file" {
 | 
			
		||||
        path = "/openbao/data"
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
  service:
 | 
			
		||||
    enabled: true
 | 
			
		||||
 | 
			
		||||
  dataStorage:
 | 
			
		||||
    enabled: true
 | 
			
		||||
    size: 10Gi
 | 
			
		||||
    storageClass: null
 | 
			
		||||
    accessMode: ReadWriteOnce
 | 
			
		||||
 | 
			
		||||
  auditStorage:
 | 
			
		||||
    enabled: true
 | 
			
		||||
    size: 10Gi
 | 
			
		||||
    storageClass: null
 | 
			
		||||
    accessMode: ReadWriteOnce"#
 | 
			
		||||
            .to_string(),
 | 
			
		||||
    );
 | 
			
		||||
    let openbao = HelmChartScore {
 | 
			
		||||
        namespace: Some(NonBlankString::from_str("openbao").unwrap()),
 | 
			
		||||
        release_name: NonBlankString::from_str("openbao").unwrap(),
 | 
			
		||||
        chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
 | 
			
		||||
        chart_version: None,
 | 
			
		||||
        values_overrides: None,
 | 
			
		||||
        values_yaml,
 | 
			
		||||
        create_namespace: true,
 | 
			
		||||
        install_only: true,
 | 
			
		||||
        repository: Some(HelmRepository::new(
 | 
			
		||||
            "openbao".to_string(),
 | 
			
		||||
            hurl!("https://openbao.github.io/openbao-helm"),
 | 
			
		||||
            true,
 | 
			
		||||
        )),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    harmony_cli::run(
 | 
			
		||||
        Inventory::autoload(),
 | 
			
		||||
        K8sAnywhereTopology::from_env(),
 | 
			
		||||
        vec![Box::new(openbao)],
 | 
			
		||||
        None,
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .unwrap();
 | 
			
		||||
}
 | 
			
		||||
@ -16,3 +16,6 @@ harmony_macros = { path = "../../harmony_macros" }
 | 
			
		||||
log = { workspace = true }
 | 
			
		||||
env_logger = { workspace = true }
 | 
			
		||||
url = { workspace = true }
 | 
			
		||||
harmony_secret = { path = "../../harmony_secret" }
 | 
			
		||||
brocade = { path = "../../brocade" }
 | 
			
		||||
serde = { workspace = true }
 | 
			
		||||
 | 
			
		||||
@ -3,10 +3,11 @@ use std::{
 | 
			
		||||
    sync::Arc,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use brocade::BrocadeOptions;
 | 
			
		||||
use cidr::Ipv4Cidr;
 | 
			
		||||
use harmony::{
 | 
			
		||||
    hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
 | 
			
		||||
    infra::opnsense::OPNSenseManagementInterface,
 | 
			
		||||
    infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    modules::{
 | 
			
		||||
        dummy::{ErrorScore, PanicScore, SuccessScore},
 | 
			
		||||
@ -18,7 +19,9 @@ use harmony::{
 | 
			
		||||
    topology::{LogicalHost, UnmanagedRouter},
 | 
			
		||||
};
 | 
			
		||||
use harmony_macros::{ip, mac_address};
 | 
			
		||||
use harmony_secret::{Secret, SecretManager};
 | 
			
		||||
use harmony_types::net::Url;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
#[tokio::main]
 | 
			
		||||
async fn main() {
 | 
			
		||||
@ -27,6 +30,26 @@ async fn main() {
 | 
			
		||||
        name: String::from("opnsense-1"),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
 | 
			
		||||
        .await
 | 
			
		||||
        .expect("Failed to get credentials");
 | 
			
		||||
 | 
			
		||||
    let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
 | 
			
		||||
    let brocade_options = Some(BrocadeOptions {
 | 
			
		||||
        dry_run: *harmony::config::DRY_RUN,
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    });
 | 
			
		||||
    let switch_client = BrocadeSwitchClient::init(
 | 
			
		||||
        &switches,
 | 
			
		||||
        &switch_auth.username,
 | 
			
		||||
        &switch_auth.password,
 | 
			
		||||
        brocade_options,
 | 
			
		||||
    )
 | 
			
		||||
    .await
 | 
			
		||||
    .expect("Failed to connect to switch");
 | 
			
		||||
 | 
			
		||||
    let switch_client = Arc::new(switch_client);
 | 
			
		||||
 | 
			
		||||
    let opnsense = Arc::new(
 | 
			
		||||
        harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
 | 
			
		||||
    );
 | 
			
		||||
@ -34,6 +57,7 @@ async fn main() {
 | 
			
		||||
    let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
 | 
			
		||||
    let gateway_ip = IpAddr::V4(gateway_ipv4);
 | 
			
		||||
    let topology = harmony::topology::HAClusterTopology {
 | 
			
		||||
        kubeconfig: None,
 | 
			
		||||
        domain_name: "demo.harmony.mcd".to_string(),
 | 
			
		||||
        router: Arc::new(UnmanagedRouter::new(
 | 
			
		||||
            gateway_ip,
 | 
			
		||||
@ -54,7 +78,7 @@ async fn main() {
 | 
			
		||||
            name: "cp0".to_string(),
 | 
			
		||||
        },
 | 
			
		||||
        workers: vec![],
 | 
			
		||||
        switch: vec![],
 | 
			
		||||
        switch_client: switch_client.clone(),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let inventory = Inventory {
 | 
			
		||||
@ -109,3 +133,9 @@ async fn main() {
 | 
			
		||||
    .await
 | 
			
		||||
    .unwrap();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Secret, Serialize, Deserialize, Debug)]
 | 
			
		||||
pub struct BrocadeSwitchAuth {
 | 
			
		||||
    pub username: String,
 | 
			
		||||
    pub password: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								examples/remove_rook_osd/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								examples/remove_rook_osd/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,11 @@
 | 
			
		||||
[package]
 | 
			
		||||
name = "example-remove-rook-osd"
 | 
			
		||||
edition = "2024"
 | 
			
		||||
version.workspace = true
 | 
			
		||||
readme.workspace = true
 | 
			
		||||
license.workspace = true
 | 
			
		||||
 | 
			
		||||
[dependencies]
 | 
			
		||||
harmony = { version = "0.1.0", path = "../../harmony" }
 | 
			
		||||
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
 | 
			
		||||
tokio.workspace = true
 | 
			
		||||
							
								
								
									
										18
									
								
								examples/remove_rook_osd/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								examples/remove_rook_osd/src/main.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,18 @@
 | 
			
		||||
use harmony::{
 | 
			
		||||
    inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
 | 
			
		||||
    topology::K8sAnywhereTopology,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[tokio::main]
 | 
			
		||||
async fn main() {
 | 
			
		||||
    let ceph_score = CephRemoveOsd {
 | 
			
		||||
        osd_deployment_name: "rook-ceph-osd-2".to_string(),
 | 
			
		||||
        rook_ceph_namespace: "rook-ceph".to_string(),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    let topology = K8sAnywhereTopology::from_env();
 | 
			
		||||
    let inventory = Inventory::autoload();
 | 
			
		||||
    harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
 | 
			
		||||
        .await
 | 
			
		||||
        .unwrap();
 | 
			
		||||
}
 | 
			
		||||
@ -3,7 +3,7 @@ use harmony::{
 | 
			
		||||
    modules::{
 | 
			
		||||
        application::{
 | 
			
		||||
            ApplicationScore, RustWebFramework, RustWebapp,
 | 
			
		||||
            features::{PackagingDeployment, rhob_monitoring::Monitoring},
 | 
			
		||||
            features::{Monitoring, PackagingDeployment},
 | 
			
		||||
        },
 | 
			
		||||
        monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
 | 
			
		||||
    },
 | 
			
		||||
 | 
			
		||||
@ -77,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" }
 | 
			
		||||
askama.workspace = true
 | 
			
		||||
sqlx.workspace = true
 | 
			
		||||
inquire.workspace = true
 | 
			
		||||
brocade = { path = "../brocade" }
 | 
			
		||||
option-ext = "0.2.0"
 | 
			
		||||
 | 
			
		||||
[dev-dependencies]
 | 
			
		||||
pretty_assertions.workspace = true
 | 
			
		||||
assertor.workspace = true
 | 
			
		||||
 | 
			
		||||
@ -30,6 +30,7 @@ pub enum InterpretName {
 | 
			
		||||
    Lamp,
 | 
			
		||||
    ApplicationMonitoring,
 | 
			
		||||
    K8sPrometheusCrdAlerting,
 | 
			
		||||
    CephRemoveOsd,
 | 
			
		||||
    DiscoverInventoryAgent,
 | 
			
		||||
    CephClusterHealth,
 | 
			
		||||
    Custom(&'static str),
 | 
			
		||||
@ -61,6 +62,7 @@ impl std::fmt::Display for InterpretName {
 | 
			
		||||
            InterpretName::Lamp => f.write_str("LAMP"),
 | 
			
		||||
            InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
 | 
			
		||||
            InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
 | 
			
		||||
            InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
 | 
			
		||||
            InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
 | 
			
		||||
            InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
 | 
			
		||||
            InterpretName::Custom(name) => f.write_str(name),
 | 
			
		||||
 | 
			
		||||
@ -1,33 +1,28 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_macros::ip;
 | 
			
		||||
use harmony_types::net::MacAddress;
 | 
			
		||||
use harmony_types::net::Url;
 | 
			
		||||
use harmony_types::{
 | 
			
		||||
    net::{MacAddress, Url},
 | 
			
		||||
    switch::PortLocation,
 | 
			
		||||
};
 | 
			
		||||
use kube::api::ObjectMeta;
 | 
			
		||||
use log::debug;
 | 
			
		||||
use log::info;
 | 
			
		||||
 | 
			
		||||
use crate::data::FileContent;
 | 
			
		||||
use crate::executors::ExecutorError;
 | 
			
		||||
use crate::modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy};
 | 
			
		||||
use crate::topology::PxeOptions;
 | 
			
		||||
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
 | 
			
		||||
use crate::{
 | 
			
		||||
    executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use super::DHCPStaticEntry;
 | 
			
		||||
use super::DhcpServer;
 | 
			
		||||
use super::DnsRecord;
 | 
			
		||||
use super::DnsRecordType;
 | 
			
		||||
use super::DnsServer;
 | 
			
		||||
use super::Firewall;
 | 
			
		||||
use super::HttpServer;
 | 
			
		||||
use super::IpAddress;
 | 
			
		||||
use super::K8sclient;
 | 
			
		||||
use super::LoadBalancer;
 | 
			
		||||
use super::LoadBalancerService;
 | 
			
		||||
use super::LogicalHost;
 | 
			
		||||
use super::PreparationError;
 | 
			
		||||
use super::PreparationOutcome;
 | 
			
		||||
use super::Router;
 | 
			
		||||
use super::TftpServer;
 | 
			
		||||
use super::{
 | 
			
		||||
    DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
 | 
			
		||||
    HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
 | 
			
		||||
    PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
 | 
			
		||||
    Topology, k8s::K8sClient,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use super::Topology;
 | 
			
		||||
use super::k8s::K8sClient;
 | 
			
		||||
use std::collections::BTreeMap;
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
@ -40,10 +35,11 @@ pub struct HAClusterTopology {
 | 
			
		||||
    pub tftp_server: Arc<dyn TftpServer>,
 | 
			
		||||
    pub http_server: Arc<dyn HttpServer>,
 | 
			
		||||
    pub dns_server: Arc<dyn DnsServer>,
 | 
			
		||||
    pub switch_client: Arc<dyn SwitchClient>,
 | 
			
		||||
    pub bootstrap_host: LogicalHost,
 | 
			
		||||
    pub control_plane: Vec<LogicalHost>,
 | 
			
		||||
    pub workers: Vec<LogicalHost>,
 | 
			
		||||
    pub switch: Vec<LogicalHost>,
 | 
			
		||||
    pub kubeconfig: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
@ -62,9 +58,17 @@ impl Topology for HAClusterTopology {
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl K8sclient for HAClusterTopology {
 | 
			
		||||
    async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
 | 
			
		||||
        Ok(Arc::new(
 | 
			
		||||
            K8sClient::try_default().await.map_err(|e| e.to_string())?,
 | 
			
		||||
        ))
 | 
			
		||||
        match &self.kubeconfig {
 | 
			
		||||
            None => Ok(Arc::new(
 | 
			
		||||
                K8sClient::try_default().await.map_err(|e| e.to_string())?,
 | 
			
		||||
            )),
 | 
			
		||||
            Some(kubeconfig) => {
 | 
			
		||||
                let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
 | 
			
		||||
                    return Err("Failed to create k8s client".to_string());
 | 
			
		||||
                };
 | 
			
		||||
                Ok(Arc::new(client))
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -89,6 +93,193 @@ impl HAClusterTopology {
 | 
			
		||||
            .to_string()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
 | 
			
		||||
        let k8s_client = self.k8s_client().await?;
 | 
			
		||||
 | 
			
		||||
        debug!("Installing NMState controller...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        debug!("Creating NMState namespace...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        debug!("Creating NMState service account...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        debug!("Creating NMState role...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        debug!("Creating NMState role binding...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        debug!("Creating NMState operator...");
 | 
			
		||||
        k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
 | 
			
		||||
").unwrap(), Some("nmstate"))
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        k8s_client
 | 
			
		||||
            .wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        let nmstate = NMState {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some("nmstate".to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        debug!("Creating NMState: {nmstate:#?}");
 | 
			
		||||
        k8s_client
 | 
			
		||||
            .apply(&nmstate, None)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| e.to_string())?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_next_bond_id(&self) -> u8 {
 | 
			
		||||
        42 // FIXME: Find a better way to declare the bond id
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
 | 
			
		||||
        self.ensure_nmstate_operator_installed()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| {
 | 
			
		||||
                SwitchError::new(format!(
 | 
			
		||||
                    "Can't configure bond, NMState operator not available: {e}"
 | 
			
		||||
                ))
 | 
			
		||||
            })?;
 | 
			
		||||
 | 
			
		||||
        let bond_config = self.create_bond_configuration(config);
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Applying NMState bond config for host {}: {bond_config:#?}",
 | 
			
		||||
            config.host_id
 | 
			
		||||
        );
 | 
			
		||||
        self.k8s_client()
 | 
			
		||||
            .await
 | 
			
		||||
            .unwrap()
 | 
			
		||||
            .apply(&bond_config, None)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn create_bond_configuration(
 | 
			
		||||
        &self,
 | 
			
		||||
        config: &HostNetworkConfig,
 | 
			
		||||
    ) -> NodeNetworkConfigurationPolicy {
 | 
			
		||||
        let host_name = &config.host_id;
 | 
			
		||||
        let bond_id = self.get_next_bond_id();
 | 
			
		||||
        let bond_name = format!("bond{bond_id}");
 | 
			
		||||
 | 
			
		||||
        info!("Configuring bond '{bond_name}' for host '{host_name}'...");
 | 
			
		||||
 | 
			
		||||
        let mut bond_mtu: Option<u32> = None;
 | 
			
		||||
        let mut copy_mac_from: Option<String> = None;
 | 
			
		||||
        let mut bond_ports = Vec::new();
 | 
			
		||||
        let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
 | 
			
		||||
 | 
			
		||||
        for switch_port in &config.switch_ports {
 | 
			
		||||
            let interface_name = switch_port.interface.name.clone();
 | 
			
		||||
 | 
			
		||||
            interfaces.push(nmstate::InterfaceSpec {
 | 
			
		||||
                name: interface_name.clone(),
 | 
			
		||||
                description: Some(format!("Member of bond {bond_name}")),
 | 
			
		||||
                r#type: "ethernet".to_string(),
 | 
			
		||||
                state: "up".to_string(),
 | 
			
		||||
                mtu: Some(switch_port.interface.mtu),
 | 
			
		||||
                mac_address: Some(switch_port.interface.mac_address.to_string()),
 | 
			
		||||
                ipv4: Some(nmstate::IpStackSpec {
 | 
			
		||||
                    enabled: Some(false),
 | 
			
		||||
                    ..Default::default()
 | 
			
		||||
                }),
 | 
			
		||||
                ipv6: Some(nmstate::IpStackSpec {
 | 
			
		||||
                    enabled: Some(false),
 | 
			
		||||
                    ..Default::default()
 | 
			
		||||
                }),
 | 
			
		||||
                link_aggregation: None,
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            });
 | 
			
		||||
 | 
			
		||||
            bond_ports.push(interface_name.clone());
 | 
			
		||||
 | 
			
		||||
            // Use the first port's details for the bond mtu and mac address
 | 
			
		||||
            if bond_mtu.is_none() {
 | 
			
		||||
                bond_mtu = Some(switch_port.interface.mtu);
 | 
			
		||||
            }
 | 
			
		||||
            if copy_mac_from.is_none() {
 | 
			
		||||
                copy_mac_from = Some(interface_name);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        interfaces.push(nmstate::InterfaceSpec {
 | 
			
		||||
            name: bond_name.clone(),
 | 
			
		||||
            description: Some(format!("Network bond for host {host_name}")),
 | 
			
		||||
            r#type: "bond".to_string(),
 | 
			
		||||
            state: "up".to_string(),
 | 
			
		||||
            copy_mac_from,
 | 
			
		||||
            ipv4: Some(nmstate::IpStackSpec {
 | 
			
		||||
                dhcp: Some(true),
 | 
			
		||||
                enabled: Some(true),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            }),
 | 
			
		||||
            ipv6: Some(nmstate::IpStackSpec {
 | 
			
		||||
                dhcp: Some(true),
 | 
			
		||||
                autoconf: Some(true),
 | 
			
		||||
                enabled: Some(true),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            }),
 | 
			
		||||
            link_aggregation: Some(nmstate::BondSpec {
 | 
			
		||||
                mode: "802.3ad".to_string(),
 | 
			
		||||
                ports: bond_ports,
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            }),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        NodeNetworkConfigurationPolicy {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(format!("{host_name}-bond-config")),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: NodeNetworkConfigurationPolicySpec {
 | 
			
		||||
                node_selector: Some(BTreeMap::from([(
 | 
			
		||||
                    "kubernetes.io/hostname".to_string(),
 | 
			
		||||
                    host_name.to_string(),
 | 
			
		||||
                )])),
 | 
			
		||||
                desired_state: nmstate::DesiredStateSpec { interfaces },
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
 | 
			
		||||
        debug!("Configuring port channel: {config:#?}");
 | 
			
		||||
        let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
 | 
			
		||||
 | 
			
		||||
        self.switch_client
 | 
			
		||||
            .configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn autoload() -> Self {
 | 
			
		||||
        let dummy_infra = Arc::new(DummyInfra {});
 | 
			
		||||
        let dummy_host = LogicalHost {
 | 
			
		||||
@ -97,6 +288,7 @@ impl HAClusterTopology {
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        Self {
 | 
			
		||||
            kubeconfig: None,
 | 
			
		||||
            domain_name: "DummyTopology".to_string(),
 | 
			
		||||
            router: dummy_infra.clone(),
 | 
			
		||||
            load_balancer: dummy_infra.clone(),
 | 
			
		||||
@ -105,10 +297,10 @@ impl HAClusterTopology {
 | 
			
		||||
            tftp_server: dummy_infra.clone(),
 | 
			
		||||
            http_server: dummy_infra.clone(),
 | 
			
		||||
            dns_server: dummy_infra.clone(),
 | 
			
		||||
            switch_client: dummy_infra.clone(),
 | 
			
		||||
            bootstrap_host: dummy_host,
 | 
			
		||||
            control_plane: vec![],
 | 
			
		||||
            workers: vec![],
 | 
			
		||||
            switch: vec![],
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -263,6 +455,27 @@ impl HttpServer for HAClusterTopology {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl Switch for HAClusterTopology {
 | 
			
		||||
    async fn setup_switch(&self) -> Result<(), SwitchError> {
 | 
			
		||||
        self.switch_client.setup().await?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_port_for_mac_address(
 | 
			
		||||
        &self,
 | 
			
		||||
        mac_address: &MacAddress,
 | 
			
		||||
    ) -> Result<Option<PortLocation>, SwitchError> {
 | 
			
		||||
        let port = self.switch_client.find_port(mac_address).await?;
 | 
			
		||||
        Ok(port)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
 | 
			
		||||
        self.configure_bond(config).await?;
 | 
			
		||||
        self.configure_port_channel(config).await
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct DummyInfra;
 | 
			
		||||
 | 
			
		||||
@ -332,8 +545,8 @@ impl DhcpServer for DummyInfra {
 | 
			
		||||
    }
 | 
			
		||||
    async fn set_dhcp_range(
 | 
			
		||||
        &self,
 | 
			
		||||
        start: &IpAddress,
 | 
			
		||||
        end: &IpAddress,
 | 
			
		||||
        _start: &IpAddress,
 | 
			
		||||
        _end: &IpAddress,
 | 
			
		||||
    ) -> Result<(), ExecutorError> {
 | 
			
		||||
        unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
 | 
			
		||||
    }
 | 
			
		||||
@ -449,3 +662,25 @@ impl DnsServer for DummyInfra {
 | 
			
		||||
        unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl SwitchClient for DummyInfra {
 | 
			
		||||
    async fn setup(&self) -> Result<(), SwitchError> {
 | 
			
		||||
        unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn find_port(
 | 
			
		||||
        &self,
 | 
			
		||||
        _mac_address: &MacAddress,
 | 
			
		||||
    ) -> Result<Option<PortLocation>, SwitchError> {
 | 
			
		||||
        unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        _channel_name: &str,
 | 
			
		||||
        _switch_ports: Vec<PortLocation>,
 | 
			
		||||
    ) -> Result<u8, SwitchError> {
 | 
			
		||||
        unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,13 +1,21 @@
 | 
			
		||||
use std::time::Duration;
 | 
			
		||||
 | 
			
		||||
use derive_new::new;
 | 
			
		||||
use k8s_openapi::{
 | 
			
		||||
    ClusterResourceScope, NamespaceResourceScope,
 | 
			
		||||
    api::{apps::v1::Deployment, core::v1::Pod},
 | 
			
		||||
    api::{
 | 
			
		||||
        apps::v1::Deployment,
 | 
			
		||||
        core::v1::{Pod, ServiceAccount},
 | 
			
		||||
    },
 | 
			
		||||
    apimachinery::pkg::version::Info,
 | 
			
		||||
};
 | 
			
		||||
use kube::{
 | 
			
		||||
    Client, Config, Error, Resource,
 | 
			
		||||
    Client, Config, Discovery, Error, Resource,
 | 
			
		||||
    api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
 | 
			
		||||
    config::{KubeConfigOptions, Kubeconfig},
 | 
			
		||||
    core::ErrorResponse,
 | 
			
		||||
    discovery::{ApiCapabilities, Scope},
 | 
			
		||||
    error::DiscoveryError,
 | 
			
		||||
    runtime::reflector::Lookup,
 | 
			
		||||
};
 | 
			
		||||
use kube::{api::DynamicObject, runtime::conditions};
 | 
			
		||||
@ -15,11 +23,12 @@ use kube::{
 | 
			
		||||
    api::{ApiResource, GroupVersionKind},
 | 
			
		||||
    runtime::wait::await_condition,
 | 
			
		||||
};
 | 
			
		||||
use log::{debug, error, trace};
 | 
			
		||||
use log::{debug, error, info, trace, warn};
 | 
			
		||||
use serde::{Serialize, de::DeserializeOwned};
 | 
			
		||||
use serde_json::{Value, json};
 | 
			
		||||
use serde_json::json;
 | 
			
		||||
use similar::TextDiff;
 | 
			
		||||
use tokio::io::AsyncReadExt;
 | 
			
		||||
use tokio::{io::AsyncReadExt, time::sleep};
 | 
			
		||||
use url::Url;
 | 
			
		||||
 | 
			
		||||
#[derive(new, Clone)]
 | 
			
		||||
pub struct K8sClient {
 | 
			
		||||
@ -53,6 +62,22 @@ impl K8sClient {
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
 | 
			
		||||
        let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
 | 
			
		||||
        api
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
 | 
			
		||||
        let client: Client = self.client.clone();
 | 
			
		||||
        let version_info: Info = client.apiserver_version().await?;
 | 
			
		||||
        Ok(version_info)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn discovery(&self) -> Result<Discovery, Error> {
 | 
			
		||||
        let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
 | 
			
		||||
        Ok(discovery)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn get_resource_json_value(
 | 
			
		||||
        &self,
 | 
			
		||||
        name: &str,
 | 
			
		||||
@ -65,7 +90,8 @@ impl K8sClient {
 | 
			
		||||
        } else {
 | 
			
		||||
            Api::default_namespaced_with(self.client.clone(), &gvk)
 | 
			
		||||
        };
 | 
			
		||||
        Ok(resource.get(name).await?)
 | 
			
		||||
 | 
			
		||||
        resource.get(name).await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn get_deployment(
 | 
			
		||||
@ -74,11 +100,15 @@ impl K8sClient {
 | 
			
		||||
        namespace: Option<&str>,
 | 
			
		||||
    ) -> Result<Option<Deployment>, Error> {
 | 
			
		||||
        let deps: Api<Deployment> = if let Some(ns) = namespace {
 | 
			
		||||
            debug!("getting namespaced deployment");
 | 
			
		||||
            Api::namespaced(self.client.clone(), ns)
 | 
			
		||||
        } else {
 | 
			
		||||
            debug!("getting default namespace deployment");
 | 
			
		||||
            Api::default_namespaced(self.client.clone())
 | 
			
		||||
        };
 | 
			
		||||
        Ok(deps.get_opt(name).await?)
 | 
			
		||||
 | 
			
		||||
        debug!("getting deployment {} in ns {}", name, namespace.unwrap());
 | 
			
		||||
        deps.get_opt(name).await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
 | 
			
		||||
@ -87,7 +117,8 @@ impl K8sClient {
 | 
			
		||||
        } else {
 | 
			
		||||
            Api::default_namespaced(self.client.clone())
 | 
			
		||||
        };
 | 
			
		||||
        Ok(pods.get_opt(name).await?)
 | 
			
		||||
 | 
			
		||||
        pods.get_opt(name).await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn scale_deployment(
 | 
			
		||||
@ -108,7 +139,7 @@ impl K8sClient {
 | 
			
		||||
            }
 | 
			
		||||
        });
 | 
			
		||||
        let pp = PatchParams::default();
 | 
			
		||||
        let scale = Patch::Apply(&patch);
 | 
			
		||||
        let scale = Patch::Merge(&patch);
 | 
			
		||||
        deployments.patch_scale(name, &pp, &scale).await?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
@ -130,9 +161,9 @@ impl K8sClient {
 | 
			
		||||
 | 
			
		||||
    pub async fn wait_until_deployment_ready(
 | 
			
		||||
        &self,
 | 
			
		||||
        name: String,
 | 
			
		||||
        name: &str,
 | 
			
		||||
        namespace: Option<&str>,
 | 
			
		||||
        timeout: Option<u64>,
 | 
			
		||||
        timeout: Option<Duration>,
 | 
			
		||||
    ) -> Result<(), String> {
 | 
			
		||||
        let api: Api<Deployment>;
 | 
			
		||||
 | 
			
		||||
@ -142,9 +173,9 @@ impl K8sClient {
 | 
			
		||||
            api = Api::default_namespaced(self.client.clone());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
 | 
			
		||||
        let t = timeout.unwrap_or(300);
 | 
			
		||||
        let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
 | 
			
		||||
        let establish = await_condition(api, name, conditions::is_deployment_completed());
 | 
			
		||||
        let timeout = timeout.unwrap_or(Duration::from_secs(120));
 | 
			
		||||
        let res = tokio::time::timeout(timeout, establish).await;
 | 
			
		||||
 | 
			
		||||
        if res.is_ok() {
 | 
			
		||||
            Ok(())
 | 
			
		||||
@ -153,6 +184,41 @@ impl K8sClient {
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn wait_for_pod_ready(
 | 
			
		||||
        &self,
 | 
			
		||||
        pod_name: &str,
 | 
			
		||||
        namespace: Option<&str>,
 | 
			
		||||
    ) -> Result<(), Error> {
 | 
			
		||||
        let mut elapsed = 0;
 | 
			
		||||
        let interval = 5; // seconds between checks
 | 
			
		||||
        let timeout_secs = 120;
 | 
			
		||||
        loop {
 | 
			
		||||
            let pod = self.get_pod(pod_name, namespace).await?;
 | 
			
		||||
 | 
			
		||||
            if let Some(p) = pod {
 | 
			
		||||
                if let Some(status) = p.status {
 | 
			
		||||
                    if let Some(phase) = status.phase {
 | 
			
		||||
                        if phase.to_lowercase() == "running" {
 | 
			
		||||
                            return Ok(());
 | 
			
		||||
                        }
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if elapsed >= timeout_secs {
 | 
			
		||||
                return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
 | 
			
		||||
                    "'{}' in ns '{}' did not become ready within {}s",
 | 
			
		||||
                    pod_name,
 | 
			
		||||
                    namespace.unwrap(),
 | 
			
		||||
                    timeout_secs
 | 
			
		||||
                ))));
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            sleep(Duration::from_secs(interval)).await;
 | 
			
		||||
            elapsed += interval;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Will execute a commond in the first pod found that matches the specified label
 | 
			
		||||
    /// '{label}={name}'
 | 
			
		||||
    pub async fn exec_app_capture_output(
 | 
			
		||||
@ -199,7 +265,7 @@ impl K8sClient {
 | 
			
		||||
 | 
			
		||||
                if let Some(s) = status.status {
 | 
			
		||||
                    let mut stdout_buf = String::new();
 | 
			
		||||
                    if let Some(mut stdout) = process.stdout().take() {
 | 
			
		||||
                    if let Some(mut stdout) = process.stdout() {
 | 
			
		||||
                        stdout
 | 
			
		||||
                            .read_to_string(&mut stdout_buf)
 | 
			
		||||
                            .await
 | 
			
		||||
@ -305,14 +371,14 @@ impl K8sClient {
 | 
			
		||||
                Ok(current) => {
 | 
			
		||||
                    trace!("Received current value {current:#?}");
 | 
			
		||||
                    // The resource exists, so we calculate and display a diff.
 | 
			
		||||
                    println!("\nPerforming dry-run for resource: '{}'", name);
 | 
			
		||||
                    println!("\nPerforming dry-run for resource: '{name}'");
 | 
			
		||||
                    let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
 | 
			
		||||
                        panic!("Could not serialize current value : {current:#?}")
 | 
			
		||||
                    });
 | 
			
		||||
                    if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
 | 
			
		||||
                        let map = current_yaml.as_mapping_mut().unwrap();
 | 
			
		||||
                        let removed = map.remove_entry("status");
 | 
			
		||||
                        trace!("Removed status {:?}", removed);
 | 
			
		||||
                        trace!("Removed status {removed:?}");
 | 
			
		||||
                    } else {
 | 
			
		||||
                        trace!(
 | 
			
		||||
                            "Did not find status entry for current object {}/{}",
 | 
			
		||||
@ -341,14 +407,14 @@ impl K8sClient {
 | 
			
		||||
                            similar::ChangeTag::Insert => "+",
 | 
			
		||||
                            similar::ChangeTag::Equal => " ",
 | 
			
		||||
                        };
 | 
			
		||||
                        print!("{}{}", sign, change);
 | 
			
		||||
                        print!("{sign}{change}");
 | 
			
		||||
                    }
 | 
			
		||||
                    // In a dry run, we return the new resource state that would have been applied.
 | 
			
		||||
                    Ok(resource.clone())
 | 
			
		||||
                }
 | 
			
		||||
                Err(Error::Api(ErrorResponse { code: 404, .. })) => {
 | 
			
		||||
                    // The resource does not exist, so the "diff" is the entire new resource.
 | 
			
		||||
                    println!("\nPerforming dry-run for new resource: '{}'", name);
 | 
			
		||||
                    println!("\nPerforming dry-run for new resource: '{name}'");
 | 
			
		||||
                    println!(
 | 
			
		||||
                        "Resource does not exist. It would be created with the following content:"
 | 
			
		||||
                    );
 | 
			
		||||
@ -357,14 +423,14 @@ impl K8sClient {
 | 
			
		||||
 | 
			
		||||
                    // Print each line of the new resource with a '+' prefix.
 | 
			
		||||
                    for line in new_yaml.lines() {
 | 
			
		||||
                        println!("+{}", line);
 | 
			
		||||
                        println!("+{line}");
 | 
			
		||||
                    }
 | 
			
		||||
                    // In a dry run, we return the new resource state that would have been created.
 | 
			
		||||
                    Ok(resource.clone())
 | 
			
		||||
                }
 | 
			
		||||
                Err(e) => {
 | 
			
		||||
                    // Another API error occurred.
 | 
			
		||||
                    error!("Failed to get resource '{}': {}", name, e);
 | 
			
		||||
                    error!("Failed to get resource '{name}': {e}");
 | 
			
		||||
                    Err(e)
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
@ -379,7 +445,7 @@ impl K8sClient {
 | 
			
		||||
    where
 | 
			
		||||
        K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
 | 
			
		||||
        <K as Resource>::Scope: ApplyStrategy<K>,
 | 
			
		||||
        <K as kube::Resource>::DynamicType: Default,
 | 
			
		||||
        <K as Resource>::DynamicType: Default,
 | 
			
		||||
    {
 | 
			
		||||
        let mut result = Vec::new();
 | 
			
		||||
        for r in resource.iter() {
 | 
			
		||||
@ -419,9 +485,12 @@ impl K8sClient {
 | 
			
		||||
            .as_str()
 | 
			
		||||
            .expect("couldn't get kind as str");
 | 
			
		||||
 | 
			
		||||
        let split: Vec<&str> = api_version.splitn(2, "/").collect();
 | 
			
		||||
        let g = split[0];
 | 
			
		||||
        let v = split[1];
 | 
			
		||||
        let mut it = api_version.splitn(2, '/');
 | 
			
		||||
        let first = it.next().unwrap();
 | 
			
		||||
        let (g, v) = match it.next() {
 | 
			
		||||
            Some(second) => (first, second),
 | 
			
		||||
            None => ("", first),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let gvk = GroupVersionKind::gvk(g, v, kind);
 | 
			
		||||
        let api_resource = ApiResource::from_gvk(&gvk);
 | 
			
		||||
@ -441,10 +510,7 @@ impl K8sClient {
 | 
			
		||||
 | 
			
		||||
        // 6. Apply the object to the cluster using Server-Side Apply.
 | 
			
		||||
        //    This will create the resource if it doesn't exist, or update it if it does.
 | 
			
		||||
        println!(
 | 
			
		||||
            "Applying Argo Application '{}' in namespace '{}'...",
 | 
			
		||||
            name, namespace
 | 
			
		||||
        );
 | 
			
		||||
        println!("Applying '{name}' in namespace '{namespace}'...",);
 | 
			
		||||
        let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
 | 
			
		||||
        let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
 | 
			
		||||
 | 
			
		||||
@ -453,6 +519,51 @@ impl K8sClient {
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Apply a resource from a URL
 | 
			
		||||
    ///
 | 
			
		||||
    /// It is the equivalent of `kubectl apply -f <url>`
 | 
			
		||||
    pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
 | 
			
		||||
        let patch_params = PatchParams::apply("harmony");
 | 
			
		||||
        let discovery = kube::Discovery::new(self.client.clone()).run().await?;
 | 
			
		||||
 | 
			
		||||
        let yaml = reqwest::get(url)
 | 
			
		||||
            .await
 | 
			
		||||
            .expect("Could not get URL")
 | 
			
		||||
            .text()
 | 
			
		||||
            .await
 | 
			
		||||
            .expect("Could not get content from URL");
 | 
			
		||||
 | 
			
		||||
        for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
 | 
			
		||||
            let obj: DynamicObject =
 | 
			
		||||
                serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
 | 
			
		||||
            let namespace = obj.metadata.namespace.as_deref().or(ns);
 | 
			
		||||
            let type_meta = obj
 | 
			
		||||
                .types
 | 
			
		||||
                .as_ref()
 | 
			
		||||
                .expect("cannot apply object without valid TypeMeta");
 | 
			
		||||
            let gvk = GroupVersionKind::try_from(type_meta)
 | 
			
		||||
                .expect("cannot apply object without valid GroupVersionKind");
 | 
			
		||||
            let name = obj.name_any();
 | 
			
		||||
 | 
			
		||||
            if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
 | 
			
		||||
                let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
 | 
			
		||||
                trace!(
 | 
			
		||||
                    "Applying {}: \n{}",
 | 
			
		||||
                    gvk.kind,
 | 
			
		||||
                    serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
 | 
			
		||||
                );
 | 
			
		||||
                let data: serde_json::Value =
 | 
			
		||||
                    serde_json::to_value(&obj).expect("Failed to serialize JSON");
 | 
			
		||||
                let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
 | 
			
		||||
                debug!("applied {} {}", gvk.kind, name);
 | 
			
		||||
            } else {
 | 
			
		||||
                warn!("Cannot apply document for unknown {gvk:?}");
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
 | 
			
		||||
        let k = match Kubeconfig::read_from(path) {
 | 
			
		||||
            Ok(k) => k,
 | 
			
		||||
@ -472,6 +583,31 @@ impl K8sClient {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn get_dynamic_api(
 | 
			
		||||
    resource: ApiResource,
 | 
			
		||||
    capabilities: ApiCapabilities,
 | 
			
		||||
    client: Client,
 | 
			
		||||
    ns: Option<&str>,
 | 
			
		||||
    all: bool,
 | 
			
		||||
) -> Api<DynamicObject> {
 | 
			
		||||
    if capabilities.scope == Scope::Cluster || all {
 | 
			
		||||
        Api::all_with(client, &resource)
 | 
			
		||||
    } else if let Some(namespace) = ns {
 | 
			
		||||
        Api::namespaced_with(client, namespace, &resource)
 | 
			
		||||
    } else {
 | 
			
		||||
        Api::default_namespaced_with(client, &resource)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
 | 
			
		||||
    use serde::Deserialize;
 | 
			
		||||
    let mut docs = vec![];
 | 
			
		||||
    for de in serde_yaml::Deserializer::from_str(data) {
 | 
			
		||||
        docs.push(serde_yaml::Value::deserialize(de)?);
 | 
			
		||||
    }
 | 
			
		||||
    Ok(docs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub trait ApplyStrategy<K: Resource> {
 | 
			
		||||
    fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,7 +1,12 @@
 | 
			
		||||
use std::{process::Command, sync::Arc};
 | 
			
		||||
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use kube::api::GroupVersionKind;
 | 
			
		||||
use base64::{Engine, engine::general_purpose};
 | 
			
		||||
use k8s_openapi::api::{
 | 
			
		||||
    core::v1::Secret,
 | 
			
		||||
    rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
 | 
			
		||||
};
 | 
			
		||||
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
 | 
			
		||||
use log::{debug, info, warn};
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
use tokio::sync::OnceCell;
 | 
			
		||||
@ -12,14 +17,26 @@ use crate::{
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    modules::{
 | 
			
		||||
        k3d::K3DInstallationScore,
 | 
			
		||||
        monitoring::kube_prometheus::crd::{
 | 
			
		||||
            crd_alertmanager_config::CRDPrometheus,
 | 
			
		||||
            prometheus_operator::prometheus_operator_helm_chart_score,
 | 
			
		||||
            rhob_alertmanager_config::RHOBObservability,
 | 
			
		||||
        k8s::ingress::{K8sIngressScore, PathType},
 | 
			
		||||
        monitoring::{
 | 
			
		||||
            grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score},
 | 
			
		||||
            kube_prometheus::crd::{
 | 
			
		||||
                crd_alertmanager_config::CRDPrometheus,
 | 
			
		||||
                crd_grafana::{
 | 
			
		||||
                    Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard,
 | 
			
		||||
                    GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource,
 | 
			
		||||
                    GrafanaDatasourceConfig, GrafanaDatasourceJsonData,
 | 
			
		||||
                    GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec,
 | 
			
		||||
                },
 | 
			
		||||
                crd_prometheuses::LabelSelector,
 | 
			
		||||
                prometheus_operator::prometheus_operator_helm_chart_score,
 | 
			
		||||
                rhob_alertmanager_config::RHOBObservability,
 | 
			
		||||
                service_monitor::ServiceMonitor,
 | 
			
		||||
            },
 | 
			
		||||
        },
 | 
			
		||||
        prometheus::{
 | 
			
		||||
            k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
 | 
			
		||||
            prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
 | 
			
		||||
            prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore,
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    score::Score,
 | 
			
		||||
@ -47,6 +64,13 @@ struct K8sState {
 | 
			
		||||
    message: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
pub enum KubernetesDistribution {
 | 
			
		||||
    OpenshiftFamily,
 | 
			
		||||
    K3sFamily,
 | 
			
		||||
    Default,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
enum K8sSource {
 | 
			
		||||
    LocalK3d,
 | 
			
		||||
@ -57,6 +81,7 @@ enum K8sSource {
 | 
			
		||||
pub struct K8sAnywhereTopology {
 | 
			
		||||
    k8s_state: Arc<OnceCell<Option<K8sState>>>,
 | 
			
		||||
    tenant_manager: Arc<OnceCell<K8sTenantManager>>,
 | 
			
		||||
    k8s_distribution: Arc<OnceCell<KubernetesDistribution>>,
 | 
			
		||||
    config: Arc<K8sAnywhereConfig>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -78,41 +103,172 @@ impl K8sclient for K8sAnywhereTopology {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
 | 
			
		||||
impl Grafana for K8sAnywhereTopology {
 | 
			
		||||
    async fn ensure_grafana_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        debug!("ensure grafana operator");
 | 
			
		||||
        let client = self.k8s_client().await.unwrap();
 | 
			
		||||
        let grafana_gvk = GroupVersionKind {
 | 
			
		||||
            group: "grafana.integreatly.org".to_string(),
 | 
			
		||||
            version: "v1beta1".to_string(),
 | 
			
		||||
            kind: "Grafana".to_string(),
 | 
			
		||||
        };
 | 
			
		||||
        let name = "grafanas.grafana.integreatly.org";
 | 
			
		||||
        let ns = "grafana";
 | 
			
		||||
 | 
			
		||||
        let grafana_crd = client
 | 
			
		||||
            .get_resource_json_value(name, Some(ns), &grafana_gvk)
 | 
			
		||||
            .await;
 | 
			
		||||
        match grafana_crd {
 | 
			
		||||
            Ok(_) => {
 | 
			
		||||
                return Ok(PreparationOutcome::Success {
 | 
			
		||||
                    details: "Found grafana CRDs in cluster".to_string(),
 | 
			
		||||
                });
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            Err(_) => {
 | 
			
		||||
                return self
 | 
			
		||||
                    .install_grafana_operator(inventory, Some("grafana"))
 | 
			
		||||
                    .await;
 | 
			
		||||
            }
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
    async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        let ns = "grafana";
 | 
			
		||||
 | 
			
		||||
        let mut label = BTreeMap::new();
 | 
			
		||||
 | 
			
		||||
        label.insert("dashboards".to_string(), "grafana".to_string());
 | 
			
		||||
 | 
			
		||||
        let label_selector = LabelSelector {
 | 
			
		||||
            match_labels: label.clone(),
 | 
			
		||||
            match_expressions: vec![],
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let client = self.k8s_client().await?;
 | 
			
		||||
 | 
			
		||||
        let grafana = self.build_grafana(ns, &label);
 | 
			
		||||
 | 
			
		||||
        client.apply(&grafana, Some(ns)).await?;
 | 
			
		||||
        //TODO change this to a ensure ready or something better than just a timeout
 | 
			
		||||
        client
 | 
			
		||||
            .wait_until_deployment_ready(
 | 
			
		||||
                "grafana-grafana-deployment",
 | 
			
		||||
                Some("grafana"),
 | 
			
		||||
                Some(Duration::from_secs(30)),
 | 
			
		||||
            )
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        let sa_name = "grafana-grafana-sa";
 | 
			
		||||
        let token_secret_name = "grafana-sa-token-secret";
 | 
			
		||||
 | 
			
		||||
        let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns);
 | 
			
		||||
 | 
			
		||||
        client.apply(&sa_token_secret, Some(ns)).await?;
 | 
			
		||||
        let secret_gvk = GroupVersionKind {
 | 
			
		||||
            group: "".to_string(),
 | 
			
		||||
            version: "v1".to_string(),
 | 
			
		||||
            kind: "Secret".to_string(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let secret = client
 | 
			
		||||
            .get_resource_json_value(token_secret_name, Some(ns), &secret_gvk)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        let token = format!(
 | 
			
		||||
            "Bearer {}",
 | 
			
		||||
            self.extract_and_normalize_token(&secret).unwrap()
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        debug!("creating grafana clusterrole binding");
 | 
			
		||||
 | 
			
		||||
        let clusterrolebinding =
 | 
			
		||||
            self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns);
 | 
			
		||||
 | 
			
		||||
        client.apply(&clusterrolebinding, Some(ns)).await?;
 | 
			
		||||
 | 
			
		||||
        debug!("creating grafana datasource crd");
 | 
			
		||||
 | 
			
		||||
        let thanos_url = format!(
 | 
			
		||||
            "https://{}",
 | 
			
		||||
            self.get_domain("thanos-querier-openshift-monitoring")
 | 
			
		||||
                .await
 | 
			
		||||
                .unwrap()
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        let thanos_openshift_datasource = self.build_grafana_datasource(
 | 
			
		||||
            "thanos-openshift-monitoring",
 | 
			
		||||
            ns,
 | 
			
		||||
            &label_selector,
 | 
			
		||||
            &thanos_url,
 | 
			
		||||
            &token,
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        client.apply(&thanos_openshift_datasource, Some(ns)).await?;
 | 
			
		||||
 | 
			
		||||
        debug!("creating grafana dashboard crd");
 | 
			
		||||
        let dashboard = self.build_grafana_dashboard(ns, &label_selector);
 | 
			
		||||
 | 
			
		||||
        client.apply(&dashboard, Some(ns)).await?;
 | 
			
		||||
        debug!("creating grafana ingress");
 | 
			
		||||
        let grafana_ingress = self.build_grafana_ingress(ns).await;
 | 
			
		||||
 | 
			
		||||
        grafana_ingress
 | 
			
		||||
            .interpret(&Inventory::empty(), self)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| PreparationError::new(e.to_string()))?;
 | 
			
		||||
 | 
			
		||||
        Ok(PreparationOutcome::Success {
 | 
			
		||||
            details: "Installed grafana composants".to_string(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology {
 | 
			
		||||
    async fn install_prometheus(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &CRDPrometheus,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        _receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        let client = self.k8s_client().await?;
 | 
			
		||||
 | 
			
		||||
        for monitor in sender.service_monitor.iter() {
 | 
			
		||||
            client
 | 
			
		||||
                .apply(monitor, Some(&sender.namespace))
 | 
			
		||||
                .await
 | 
			
		||||
                .map_err(|e| PreparationError::new(e.to_string()))?;
 | 
			
		||||
        }
 | 
			
		||||
        Ok(PreparationOutcome::Success {
 | 
			
		||||
            details: "successfuly installed prometheus components".to_string(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn ensure_prometheus_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &CRDPrometheus,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        let po_result = self.ensure_prometheus_operator(sender).await?;
 | 
			
		||||
 | 
			
		||||
        if po_result == PreparationOutcome::Noop {
 | 
			
		||||
            debug!("Skipping Prometheus CR installation due to missing operator.");
 | 
			
		||||
            return Ok(po_result);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let result = self
 | 
			
		||||
            .get_k8s_prometheus_application_score(sender.clone(), receivers)
 | 
			
		||||
            .await
 | 
			
		||||
            .interpret(inventory, self)
 | 
			
		||||
            .await;
 | 
			
		||||
 | 
			
		||||
        match result {
 | 
			
		||||
            Ok(outcome) => match outcome.status {
 | 
			
		||||
                InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
 | 
			
		||||
                    details: outcome.message,
 | 
			
		||||
                }),
 | 
			
		||||
                InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
 | 
			
		||||
                _ => Err(PreparationError::new(outcome.message)),
 | 
			
		||||
            },
 | 
			
		||||
            Err(err) => Err(PreparationError::new(err.to_string())),
 | 
			
		||||
        match po_result {
 | 
			
		||||
            PreparationOutcome::Success { details: _ } => {
 | 
			
		||||
                debug!("Detected prometheus crds operator present in cluster.");
 | 
			
		||||
                return Ok(po_result);
 | 
			
		||||
            }
 | 
			
		||||
            PreparationOutcome::Noop => {
 | 
			
		||||
                debug!("Skipping Prometheus CR installation due to missing operator.");
 | 
			
		||||
                return Ok(po_result);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
 | 
			
		||||
impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology {
 | 
			
		||||
    async fn install_prometheus(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &RHOBObservability,
 | 
			
		||||
@ -146,6 +302,14 @@ impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology
 | 
			
		||||
            Err(err) => Err(PreparationError::new(err.to_string())),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn ensure_prometheus_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &RHOBObservability,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Serialize for K8sAnywhereTopology {
 | 
			
		||||
@ -162,6 +326,7 @@ impl K8sAnywhereTopology {
 | 
			
		||||
        Self {
 | 
			
		||||
            k8s_state: Arc::new(OnceCell::new()),
 | 
			
		||||
            tenant_manager: Arc::new(OnceCell::new()),
 | 
			
		||||
            k8s_distribution: Arc::new(OnceCell::new()),
 | 
			
		||||
            config: Arc::new(K8sAnywhereConfig::from_env()),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@ -170,10 +335,216 @@ impl K8sAnywhereTopology {
 | 
			
		||||
        Self {
 | 
			
		||||
            k8s_state: Arc::new(OnceCell::new()),
 | 
			
		||||
            tenant_manager: Arc::new(OnceCell::new()),
 | 
			
		||||
            k8s_distribution: Arc::new(OnceCell::new()),
 | 
			
		||||
            config: Arc::new(config),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
 | 
			
		||||
        self.k8s_distribution
 | 
			
		||||
            .get_or_try_init(async || {
 | 
			
		||||
                let client = self.k8s_client().await.unwrap();
 | 
			
		||||
 | 
			
		||||
                let discovery = client.discovery().await.map_err(|e| {
 | 
			
		||||
                    PreparationError::new(format!("Could not discover API groups: {}", e))
 | 
			
		||||
                })?;
 | 
			
		||||
 | 
			
		||||
                let version = client.get_apiserver_version().await.map_err(|e| {
 | 
			
		||||
                    PreparationError::new(format!("Could not get server version: {}", e))
 | 
			
		||||
                })?;
 | 
			
		||||
 | 
			
		||||
                // OpenShift / OKD
 | 
			
		||||
                if discovery
 | 
			
		||||
                    .groups()
 | 
			
		||||
                    .any(|g| g.name() == "project.openshift.io")
 | 
			
		||||
                {
 | 
			
		||||
                    return Ok(KubernetesDistribution::OpenshiftFamily);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                // K3d / K3s
 | 
			
		||||
                if version.git_version.contains("k3s") {
 | 
			
		||||
                    return Ok(KubernetesDistribution::K3sFamily);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                return Ok(KubernetesDistribution::Default);
 | 
			
		||||
            })
 | 
			
		||||
            .await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> {
 | 
			
		||||
        let token_b64 = secret
 | 
			
		||||
            .data
 | 
			
		||||
            .get("token")
 | 
			
		||||
            .or_else(|| secret.data.get("data").and_then(|d| d.get("token")))
 | 
			
		||||
            .and_then(|v| v.as_str())?;
 | 
			
		||||
 | 
			
		||||
        let bytes = general_purpose::STANDARD.decode(token_b64).ok()?;
 | 
			
		||||
 | 
			
		||||
        let s = String::from_utf8(bytes).ok()?;
 | 
			
		||||
 | 
			
		||||
        let cleaned = s
 | 
			
		||||
            .trim_matches(|c: char| c.is_whitespace() || c == '\0')
 | 
			
		||||
            .to_string();
 | 
			
		||||
        Some(cleaned)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn build_cluster_rolebinding(
 | 
			
		||||
        &self,
 | 
			
		||||
        service_account_name: &str,
 | 
			
		||||
        clusterrole_name: &str,
 | 
			
		||||
        ns: &str,
 | 
			
		||||
    ) -> ClusterRoleBinding {
 | 
			
		||||
        ClusterRoleBinding {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(format!("{}-view-binding", service_account_name)),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            role_ref: RoleRef {
 | 
			
		||||
                api_group: "rbac.authorization.k8s.io".into(),
 | 
			
		||||
                kind: "ClusterRole".into(),
 | 
			
		||||
                name: clusterrole_name.into(),
 | 
			
		||||
            },
 | 
			
		||||
            subjects: Some(vec![Subject {
 | 
			
		||||
                kind: "ServiceAccount".into(),
 | 
			
		||||
                name: service_account_name.into(),
 | 
			
		||||
                namespace: Some(ns.into()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            }]),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn build_sa_token_secret(
 | 
			
		||||
        &self,
 | 
			
		||||
        secret_name: &str,
 | 
			
		||||
        service_account_name: &str,
 | 
			
		||||
        ns: &str,
 | 
			
		||||
    ) -> Secret {
 | 
			
		||||
        let mut annotations = BTreeMap::new();
 | 
			
		||||
        annotations.insert(
 | 
			
		||||
            "kubernetes.io/service-account.name".to_string(),
 | 
			
		||||
            service_account_name.to_string(),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        Secret {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(secret_name.into()),
 | 
			
		||||
                namespace: Some(ns.into()),
 | 
			
		||||
                annotations: Some(annotations),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            type_: Some("kubernetes.io/service-account-token".to_string()),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn build_grafana_datasource(
 | 
			
		||||
        &self,
 | 
			
		||||
        name: &str,
 | 
			
		||||
        ns: &str,
 | 
			
		||||
        label_selector: &LabelSelector,
 | 
			
		||||
        url: &str,
 | 
			
		||||
        token: &str,
 | 
			
		||||
    ) -> GrafanaDatasource {
 | 
			
		||||
        let mut json_data = BTreeMap::new();
 | 
			
		||||
        json_data.insert("timeInterval".to_string(), "5s".to_string());
 | 
			
		||||
 | 
			
		||||
        GrafanaDatasource {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(name.to_string()),
 | 
			
		||||
                namespace: Some(ns.to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: GrafanaDatasourceSpec {
 | 
			
		||||
                instance_selector: label_selector.clone(),
 | 
			
		||||
                allow_cross_namespace_import: Some(true),
 | 
			
		||||
                values_from: None,
 | 
			
		||||
                datasource: GrafanaDatasourceConfig {
 | 
			
		||||
                    access: "proxy".to_string(),
 | 
			
		||||
                    name: name.to_string(),
 | 
			
		||||
                    r#type: "prometheus".to_string(),
 | 
			
		||||
                    url: url.to_string(),
 | 
			
		||||
                    database: None,
 | 
			
		||||
                    json_data: Some(GrafanaDatasourceJsonData {
 | 
			
		||||
                        time_interval: Some("60s".to_string()),
 | 
			
		||||
                        http_header_name1: Some("Authorization".to_string()),
 | 
			
		||||
                        tls_skip_verify: Some(true),
 | 
			
		||||
                        oauth_pass_thru: Some(true),
 | 
			
		||||
                    }),
 | 
			
		||||
                    secure_json_data: Some(GrafanaDatasourceSecureJsonData {
 | 
			
		||||
                        http_header_value1: Some(format!("Bearer {token}")),
 | 
			
		||||
                    }),
 | 
			
		||||
                    is_default: Some(false),
 | 
			
		||||
                    editable: Some(true),
 | 
			
		||||
                },
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn build_grafana_dashboard(
 | 
			
		||||
        &self,
 | 
			
		||||
        ns: &str,
 | 
			
		||||
        label_selector: &LabelSelector,
 | 
			
		||||
    ) -> GrafanaDashboard {
 | 
			
		||||
        let graf_dashboard = GrafanaDashboard {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(format!("grafana-dashboard-{}", ns)),
 | 
			
		||||
                namespace: Some(ns.to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: GrafanaDashboardSpec {
 | 
			
		||||
                resync_period: Some("30s".to_string()),
 | 
			
		||||
                instance_selector: label_selector.clone(),
 | 
			
		||||
                datasources: Some(vec![GrafanaDashboardDatasource {
 | 
			
		||||
                    input_name: "DS_PROMETHEUS".to_string(),
 | 
			
		||||
                    datasource_name: "thanos-openshift-monitoring".to_string(),
 | 
			
		||||
                }]),
 | 
			
		||||
                json: None,
 | 
			
		||||
                grafana_com: Some(GrafanaCom {
 | 
			
		||||
                    id: 17406,
 | 
			
		||||
                    revision: None,
 | 
			
		||||
                }),
 | 
			
		||||
            },
 | 
			
		||||
        };
 | 
			
		||||
        graf_dashboard
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD {
 | 
			
		||||
        let grafana = GrafanaCRD {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(format!("grafana-{}", ns)),
 | 
			
		||||
                namespace: Some(ns.to_string()),
 | 
			
		||||
                labels: Some(labels.clone()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: GrafanaSpec {
 | 
			
		||||
                config: None,
 | 
			
		||||
                admin_user: None,
 | 
			
		||||
                admin_password: None,
 | 
			
		||||
                ingress: None,
 | 
			
		||||
                persistence: None,
 | 
			
		||||
                resources: None,
 | 
			
		||||
            },
 | 
			
		||||
        };
 | 
			
		||||
        grafana
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore {
 | 
			
		||||
        let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap();
 | 
			
		||||
        let name = format!("{}-grafana", ns);
 | 
			
		||||
        let backend_service = format!("grafana-{}-service", ns);
 | 
			
		||||
 | 
			
		||||
        K8sIngressScore {
 | 
			
		||||
            name: fqdn::fqdn!(&name),
 | 
			
		||||
            host: fqdn::fqdn!(&domain),
 | 
			
		||||
            backend_service: fqdn::fqdn!(&backend_service),
 | 
			
		||||
            port: 3000,
 | 
			
		||||
            path: Some("/".to_string()),
 | 
			
		||||
            path_type: Some(PathType::Prefix),
 | 
			
		||||
            namespace: Some(fqdn::fqdn!(&ns)),
 | 
			
		||||
            ingress_class_name: Some("openshift-default".to_string()),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn get_cluster_observability_operator_prometheus_application_score(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: RHOBObservability,
 | 
			
		||||
@ -191,13 +562,14 @@ impl K8sAnywhereTopology {
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: CRDPrometheus,
 | 
			
		||||
        receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
 | 
			
		||||
        service_monitors: Option<Vec<ServiceMonitor>>,
 | 
			
		||||
    ) -> K8sPrometheusCRDAlertingScore {
 | 
			
		||||
        K8sPrometheusCRDAlertingScore {
 | 
			
		||||
        return K8sPrometheusCRDAlertingScore {
 | 
			
		||||
            sender,
 | 
			
		||||
            receivers: receivers.unwrap_or_default(),
 | 
			
		||||
            service_monitors: vec![],
 | 
			
		||||
            service_monitors: service_monitors.unwrap_or_default(),
 | 
			
		||||
            prometheus_rules: vec![],
 | 
			
		||||
        }
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
 | 
			
		||||
@ -465,6 +837,30 @@ impl K8sAnywhereTopology {
 | 
			
		||||
            details: "prometheus operator present in cluster".into(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn install_grafana_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        ns: Option<&str>,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
        let namespace = ns.unwrap_or("grafana");
 | 
			
		||||
        info!("installing grafana operator in ns {namespace}");
 | 
			
		||||
        let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await;
 | 
			
		||||
        let mut namespace_scope = false;
 | 
			
		||||
        if tenant.is_some() {
 | 
			
		||||
            namespace_scope = true;
 | 
			
		||||
        }
 | 
			
		||||
        let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope)
 | 
			
		||||
            .interpret(inventory, self)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| PreparationError::new(e.to_string()));
 | 
			
		||||
        Ok(PreparationOutcome::Success {
 | 
			
		||||
            details: format!(
 | 
			
		||||
                "Successfully installed grafana operator in ns {}",
 | 
			
		||||
                ns.unwrap()
 | 
			
		||||
            ),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug)]
 | 
			
		||||
 | 
			
		||||
@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
 | 
			
		||||
        &self,
 | 
			
		||||
        service: &LoadBalancerService,
 | 
			
		||||
    ) -> Result<(), ExecutorError> {
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Listing LoadBalancer services {:?}",
 | 
			
		||||
            self.list_services().await
 | 
			
		||||
        );
 | 
			
		||||
        if !self.list_services().await.contains(service) {
 | 
			
		||||
            self.add_service(service).await?;
 | 
			
		||||
        }
 | 
			
		||||
        self.add_service(service).await?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,10 +1,21 @@
 | 
			
		||||
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
 | 
			
		||||
use std::{
 | 
			
		||||
    error::Error,
 | 
			
		||||
    fmt::{self, Debug},
 | 
			
		||||
    net::Ipv4Addr,
 | 
			
		||||
    str::FromStr,
 | 
			
		||||
    sync::Arc,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::net::{IpAddress, MacAddress};
 | 
			
		||||
use derive_new::new;
 | 
			
		||||
use harmony_types::{
 | 
			
		||||
    id::Id,
 | 
			
		||||
    net::{IpAddress, MacAddress},
 | 
			
		||||
    switch::PortLocation,
 | 
			
		||||
};
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
use crate::executors::ExecutorError;
 | 
			
		||||
use crate::{executors::ExecutorError, hardware::PhysicalHost};
 | 
			
		||||
 | 
			
		||||
use super::{LogicalHost, k8s::K8sClient};
 | 
			
		||||
 | 
			
		||||
@ -15,8 +26,8 @@ pub struct DHCPStaticEntry {
 | 
			
		||||
    pub ip: Ipv4Addr,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl std::fmt::Display for DHCPStaticEntry {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
impl fmt::Display for DHCPStaticEntry {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        let mac = self
 | 
			
		||||
            .mac
 | 
			
		||||
            .iter()
 | 
			
		||||
@ -38,8 +49,8 @@ pub trait Firewall: Send + Sync {
 | 
			
		||||
    fn get_host(&self) -> LogicalHost;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl std::fmt::Debug for dyn Firewall {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
impl Debug for dyn Firewall {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        f.write_fmt(format_args!("Firewall {}", self.get_ip()))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -61,7 +72,7 @@ pub struct PxeOptions {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
 | 
			
		||||
pub trait DhcpServer: Send + Sync + Debug {
 | 
			
		||||
    async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
 | 
			
		||||
    async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
 | 
			
		||||
    async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
 | 
			
		||||
@ -100,8 +111,8 @@ pub trait DnsServer: Send + Sync {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl std::fmt::Debug for dyn DnsServer {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
impl Debug for dyn DnsServer {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -137,8 +148,8 @@ pub enum DnsRecordType {
 | 
			
		||||
    TXT,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl std::fmt::Display for DnsRecordType {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
impl fmt::Display for DnsRecordType {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            DnsRecordType::A => write!(f, "A"),
 | 
			
		||||
            DnsRecordType::AAAA => write!(f, "AAAA"),
 | 
			
		||||
@ -172,6 +183,77 @@ impl FromStr for DnsRecordType {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait Switch: Send + Sync {
 | 
			
		||||
    async fn setup_switch(&self) -> Result<(), SwitchError>;
 | 
			
		||||
 | 
			
		||||
    async fn get_port_for_mac_address(
 | 
			
		||||
        &self,
 | 
			
		||||
        mac_address: &MacAddress,
 | 
			
		||||
    ) -> Result<Option<PortLocation>, SwitchError>;
 | 
			
		||||
 | 
			
		||||
    async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, PartialEq)]
 | 
			
		||||
pub struct HostNetworkConfig {
 | 
			
		||||
    pub host_id: Id,
 | 
			
		||||
    pub switch_ports: Vec<SwitchPort>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, PartialEq)]
 | 
			
		||||
pub struct SwitchPort {
 | 
			
		||||
    pub interface: NetworkInterface,
 | 
			
		||||
    pub port: PortLocation,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, PartialEq)]
 | 
			
		||||
pub struct NetworkInterface {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub mac_address: MacAddress,
 | 
			
		||||
    pub speed_mbps: Option<u32>,
 | 
			
		||||
    pub mtu: u32,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, new)]
 | 
			
		||||
pub struct SwitchError {
 | 
			
		||||
    msg: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl fmt::Display for SwitchError {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        f.write_str(&self.msg)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Error for SwitchError {}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait SwitchClient: Debug + Send + Sync {
 | 
			
		||||
    /// Executes essential, idempotent, one-time initial configuration steps.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This is an opiniated procedure that setups a switch to provide high availability
 | 
			
		||||
    /// capabilities as decided by the NationTech team.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This includes tasks like enabling switchport for all interfaces
 | 
			
		||||
    /// except the ones intended for Fabric Networking, etc.
 | 
			
		||||
    ///
 | 
			
		||||
    /// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
 | 
			
		||||
    /// and that it doesn't break existing configurations.
 | 
			
		||||
    async fn setup(&self) -> Result<(), SwitchError>;
 | 
			
		||||
 | 
			
		||||
    async fn find_port(
 | 
			
		||||
        &self,
 | 
			
		||||
        mac_address: &MacAddress,
 | 
			
		||||
    ) -> Result<Option<PortLocation>, SwitchError>;
 | 
			
		||||
 | 
			
		||||
    async fn configure_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        switch_ports: Vec<PortLocation>,
 | 
			
		||||
    ) -> Result<u8, SwitchError>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod test {
 | 
			
		||||
    use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
@ -21,6 +21,7 @@ pub struct AlertingInterpret<S: AlertSender> {
 | 
			
		||||
    pub sender: S,
 | 
			
		||||
    pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
 | 
			
		||||
    pub rules: Vec<Box<dyn AlertRule<S>>>,
 | 
			
		||||
    pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
@ -30,6 +31,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        debug!("hit sender configure for AlertingInterpret");
 | 
			
		||||
        self.sender.configure(inventory, topology).await?;
 | 
			
		||||
        for receiver in self.receivers.iter() {
 | 
			
		||||
            receiver.install(&self.sender).await?;
 | 
			
		||||
@ -38,6 +40,12 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
 | 
			
		||||
            debug!("installing rule: {:#?}", rule);
 | 
			
		||||
            rule.install(&self.sender).await?;
 | 
			
		||||
        }
 | 
			
		||||
        if let Some(targets) = &self.scrape_targets {
 | 
			
		||||
            for target in targets.iter() {
 | 
			
		||||
                debug!("installing scrape_target: {:#?}", target);
 | 
			
		||||
                target.install(&self.sender).await?;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        self.sender.ensure_installed(inventory, topology).await?;
 | 
			
		||||
        Ok(Outcome::success(format!(
 | 
			
		||||
            "successfully installed alert sender {}",
 | 
			
		||||
@ -77,6 +85,7 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait ScrapeTarget<S: AlertSender> {
 | 
			
		||||
    async fn install(&self, sender: &S) -> Result<(), InterpretError>;
 | 
			
		||||
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
 | 
			
		||||
    async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
 | 
			
		||||
    fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										378
									
								
								harmony/src/infra/brocade.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										378
									
								
								harmony/src/infra/brocade.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,378 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
 | 
			
		||||
use harmony_types::{
 | 
			
		||||
    net::{IpAddress, MacAddress},
 | 
			
		||||
    switch::{PortDeclaration, PortLocation},
 | 
			
		||||
};
 | 
			
		||||
use option_ext::OptionExt;
 | 
			
		||||
 | 
			
		||||
use crate::topology::{SwitchClient, SwitchError};
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct BrocadeSwitchClient {
 | 
			
		||||
    brocade: Box<dyn BrocadeClient + Send + Sync>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl BrocadeSwitchClient {
 | 
			
		||||
    pub async fn init(
 | 
			
		||||
        ip_addresses: &[IpAddress],
 | 
			
		||||
        username: &str,
 | 
			
		||||
        password: &str,
 | 
			
		||||
        options: Option<BrocadeOptions>,
 | 
			
		||||
    ) -> Result<Self, brocade::Error> {
 | 
			
		||||
        let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
 | 
			
		||||
        Ok(Self { brocade })
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl SwitchClient for BrocadeSwitchClient {
 | 
			
		||||
    async fn setup(&self) -> Result<(), SwitchError> {
 | 
			
		||||
        let stack_topology = self
 | 
			
		||||
            .brocade
 | 
			
		||||
            .get_stack_topology()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(e.to_string()))?;
 | 
			
		||||
 | 
			
		||||
        let interfaces = self
 | 
			
		||||
            .brocade
 | 
			
		||||
            .get_interfaces()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(e.to_string()))?;
 | 
			
		||||
 | 
			
		||||
        let interfaces: Vec<(String, PortOperatingMode)> = interfaces
 | 
			
		||||
            .into_iter()
 | 
			
		||||
            .filter(|interface| {
 | 
			
		||||
                interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
 | 
			
		||||
            })
 | 
			
		||||
            .filter(|interface| {
 | 
			
		||||
                !stack_topology.iter().any(|link: &InterSwitchLink| {
 | 
			
		||||
                    link.local_port == interface.port_location
 | 
			
		||||
                        || link.remote_port.contains(&interface.port_location)
 | 
			
		||||
                })
 | 
			
		||||
            })
 | 
			
		||||
            .map(|interface| (interface.name.clone(), PortOperatingMode::Access))
 | 
			
		||||
            .collect();
 | 
			
		||||
 | 
			
		||||
        if interfaces.is_empty() {
 | 
			
		||||
            return Ok(());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        self.brocade
 | 
			
		||||
            .configure_interfaces(interfaces)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(e.to_string()))?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn find_port(
 | 
			
		||||
        &self,
 | 
			
		||||
        mac_address: &MacAddress,
 | 
			
		||||
    ) -> Result<Option<PortLocation>, SwitchError> {
 | 
			
		||||
        let table = self
 | 
			
		||||
            .brocade
 | 
			
		||||
            .get_mac_address_table()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(format!("{e}")))?;
 | 
			
		||||
 | 
			
		||||
        let port = table
 | 
			
		||||
            .iter()
 | 
			
		||||
            .find(|entry| entry.mac_address == *mac_address)
 | 
			
		||||
            .map(|entry| match &entry.port {
 | 
			
		||||
                PortDeclaration::Single(port_location) => Ok(port_location.clone()),
 | 
			
		||||
                _ => Err(SwitchError::new(
 | 
			
		||||
                    "Multiple ports found for MAC address".into(),
 | 
			
		||||
                )),
 | 
			
		||||
            });
 | 
			
		||||
 | 
			
		||||
        match port {
 | 
			
		||||
            Some(Ok(p)) => Ok(Some(p)),
 | 
			
		||||
            Some(Err(e)) => Err(e),
 | 
			
		||||
            None => Ok(None),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn configure_port_channel(
 | 
			
		||||
        &self,
 | 
			
		||||
        channel_name: &str,
 | 
			
		||||
        switch_ports: Vec<PortLocation>,
 | 
			
		||||
    ) -> Result<u8, SwitchError> {
 | 
			
		||||
        let channel_id = self
 | 
			
		||||
            .brocade
 | 
			
		||||
            .find_available_channel_id()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(format!("{e}")))?;
 | 
			
		||||
 | 
			
		||||
        self.brocade
 | 
			
		||||
            .create_port_channel(channel_id, channel_name, &switch_ports)
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| SwitchError::new(format!("{e}")))?;
 | 
			
		||||
 | 
			
		||||
        Ok(channel_id)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod tests {
 | 
			
		||||
    use std::sync::{Arc, Mutex};
 | 
			
		||||
 | 
			
		||||
    use assertor::*;
 | 
			
		||||
    use async_trait::async_trait;
 | 
			
		||||
    use brocade::{
 | 
			
		||||
        BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
 | 
			
		||||
        InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
 | 
			
		||||
    };
 | 
			
		||||
    use harmony_types::switch::PortLocation;
 | 
			
		||||
 | 
			
		||||
    use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
 | 
			
		||||
        let first_interface = given_interface()
 | 
			
		||||
            .with_port_location(PortLocation(1, 0, 1))
 | 
			
		||||
            .build();
 | 
			
		||||
        let second_interface = given_interface()
 | 
			
		||||
            .with_port_location(PortLocation(1, 0, 4))
 | 
			
		||||
            .build();
 | 
			
		||||
        let brocade = Box::new(FakeBrocadeClient::new(
 | 
			
		||||
            vec![],
 | 
			
		||||
            vec![first_interface.clone(), second_interface.clone()],
 | 
			
		||||
        ));
 | 
			
		||||
        let client = BrocadeSwitchClient {
 | 
			
		||||
            brocade: brocade.clone(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        client.setup().await.unwrap();
 | 
			
		||||
 | 
			
		||||
        let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_interfaces).contains_exactly(vec![
 | 
			
		||||
            (first_interface.name.clone(), PortOperatingMode::Access),
 | 
			
		||||
            (second_interface.name.clone(), PortOperatingMode::Access),
 | 
			
		||||
        ]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn setup_with_an_already_configured_interface_should_skip_configuration() {
 | 
			
		||||
        let brocade = Box::new(FakeBrocadeClient::new(
 | 
			
		||||
            vec![],
 | 
			
		||||
            vec![
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_operating_mode(Some(PortOperatingMode::Access))
 | 
			
		||||
                    .build(),
 | 
			
		||||
            ],
 | 
			
		||||
        ));
 | 
			
		||||
        let client = BrocadeSwitchClient {
 | 
			
		||||
            brocade: brocade.clone(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        client.setup().await.unwrap();
 | 
			
		||||
 | 
			
		||||
        let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_interfaces).is_empty();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn setup_with_a_disconnected_interface_should_skip_configuration() {
 | 
			
		||||
        let brocade = Box::new(FakeBrocadeClient::new(
 | 
			
		||||
            vec![],
 | 
			
		||||
            vec![
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_status(InterfaceStatus::SfpAbsent)
 | 
			
		||||
                    .build(),
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_status(InterfaceStatus::NotConnected)
 | 
			
		||||
                    .build(),
 | 
			
		||||
            ],
 | 
			
		||||
        ));
 | 
			
		||||
        let client = BrocadeSwitchClient {
 | 
			
		||||
            brocade: brocade.clone(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        client.setup().await.unwrap();
 | 
			
		||||
 | 
			
		||||
        let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_interfaces).is_empty();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
 | 
			
		||||
        let brocade = Box::new(FakeBrocadeClient::new(
 | 
			
		||||
            vec![
 | 
			
		||||
                given_inter_switch_link()
 | 
			
		||||
                    .between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
 | 
			
		||||
                    .build(),
 | 
			
		||||
                given_inter_switch_link()
 | 
			
		||||
                    .between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
 | 
			
		||||
                    .build(),
 | 
			
		||||
            ],
 | 
			
		||||
            vec![
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_port_location(PortLocation(1, 0, 1))
 | 
			
		||||
                    .build(),
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_port_location(PortLocation(2, 0, 1))
 | 
			
		||||
                    .build(),
 | 
			
		||||
                given_interface()
 | 
			
		||||
                    .with_port_location(PortLocation(3, 0, 1))
 | 
			
		||||
                    .build(),
 | 
			
		||||
            ],
 | 
			
		||||
        ));
 | 
			
		||||
        let client = BrocadeSwitchClient {
 | 
			
		||||
            brocade: brocade.clone(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        client.setup().await.unwrap();
 | 
			
		||||
 | 
			
		||||
        let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_interfaces).is_empty();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[derive(Debug, Clone)]
 | 
			
		||||
    struct FakeBrocadeClient {
 | 
			
		||||
        stack_topology: Vec<InterSwitchLink>,
 | 
			
		||||
        interfaces: Vec<InterfaceInfo>,
 | 
			
		||||
        configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[async_trait]
 | 
			
		||||
    impl BrocadeClient for FakeBrocadeClient {
 | 
			
		||||
        async fn version(&self) -> Result<BrocadeInfo, Error> {
 | 
			
		||||
            todo!()
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
 | 
			
		||||
            todo!()
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
 | 
			
		||||
            Ok(self.stack_topology.clone())
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
 | 
			
		||||
            Ok(self.interfaces.clone())
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn configure_interfaces(
 | 
			
		||||
            &self,
 | 
			
		||||
            interfaces: Vec<(String, PortOperatingMode)>,
 | 
			
		||||
        ) -> Result<(), Error> {
 | 
			
		||||
            let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
 | 
			
		||||
            *configured_interfaces = interfaces;
 | 
			
		||||
 | 
			
		||||
            Ok(())
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
 | 
			
		||||
            todo!()
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn create_port_channel(
 | 
			
		||||
            &self,
 | 
			
		||||
            _channel_id: PortChannelId,
 | 
			
		||||
            _channel_name: &str,
 | 
			
		||||
            _ports: &[PortLocation],
 | 
			
		||||
        ) -> Result<(), Error> {
 | 
			
		||||
            todo!()
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
 | 
			
		||||
            todo!()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl FakeBrocadeClient {
 | 
			
		||||
        fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                stack_topology,
 | 
			
		||||
                interfaces,
 | 
			
		||||
                configured_interfaces: Arc::new(Mutex::new(vec![])),
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    struct InterfaceInfoBuilder {
 | 
			
		||||
        port_location: Option<PortLocation>,
 | 
			
		||||
        interface_type: Option<InterfaceType>,
 | 
			
		||||
        operating_mode: Option<PortOperatingMode>,
 | 
			
		||||
        status: Option<InterfaceStatus>,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl InterfaceInfoBuilder {
 | 
			
		||||
        fn build(&self) -> InterfaceInfo {
 | 
			
		||||
            let interface_type = self
 | 
			
		||||
                .interface_type
 | 
			
		||||
                .clone()
 | 
			
		||||
                .unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
 | 
			
		||||
            let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
 | 
			
		||||
            let name = format!("{interface_type} {port_location}");
 | 
			
		||||
            let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
 | 
			
		||||
 | 
			
		||||
            InterfaceInfo {
 | 
			
		||||
                name,
 | 
			
		||||
                port_location,
 | 
			
		||||
                interface_type,
 | 
			
		||||
                operating_mode: self.operating_mode.clone(),
 | 
			
		||||
                status,
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        fn with_port_location(self, port_location: PortLocation) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                port_location: Some(port_location),
 | 
			
		||||
                ..self
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                operating_mode,
 | 
			
		||||
                ..self
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        fn with_status(self, status: InterfaceStatus) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                status: Some(status),
 | 
			
		||||
                ..self
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    struct InterSwitchLinkBuilder {
 | 
			
		||||
        link: Option<(PortLocation, PortLocation)>,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl InterSwitchLinkBuilder {
 | 
			
		||||
        fn build(&self) -> InterSwitchLink {
 | 
			
		||||
            let link = self
 | 
			
		||||
                .link
 | 
			
		||||
                .clone()
 | 
			
		||||
                .unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
 | 
			
		||||
 | 
			
		||||
            InterSwitchLink {
 | 
			
		||||
                local_port: link.0,
 | 
			
		||||
                remote_port: Some(link.1),
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                link: Some((local_port, remote_port)),
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_interface() -> InterfaceInfoBuilder {
 | 
			
		||||
        InterfaceInfoBuilder {
 | 
			
		||||
            port_location: None,
 | 
			
		||||
            interface_type: None,
 | 
			
		||||
            operating_mode: None,
 | 
			
		||||
            status: None,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_inter_switch_link() -> InterSwitchLinkBuilder {
 | 
			
		||||
        InterSwitchLinkBuilder { link: None }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
 | 
			
		||||
impl InventoryRepositoryFactory {
 | 
			
		||||
    pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
 | 
			
		||||
        Ok(Box::new(
 | 
			
		||||
            SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
 | 
			
		||||
            SqliteInventoryRepository::new(&DATABASE_URL).await?,
 | 
			
		||||
        ))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,3 +1,4 @@
 | 
			
		||||
pub mod brocade;
 | 
			
		||||
pub mod executors;
 | 
			
		||||
pub mod hp_ilo;
 | 
			
		||||
pub mod intel_amt;
 | 
			
		||||
 | 
			
		||||
@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
 | 
			
		||||
        warn!(
 | 
			
		||||
            "TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
 | 
			
		||||
        );
 | 
			
		||||
        let mut config = self.opnsense_config.write().await;
 | 
			
		||||
        let mut load_balancer = config.load_balancer();
 | 
			
		||||
 | 
			
		||||
        let (frontend, backend, servers, healthcheck) =
 | 
			
		||||
            harmony_load_balancer_service_to_haproxy_xml(service);
 | 
			
		||||
        let mut load_balancer = config.load_balancer();
 | 
			
		||||
        load_balancer.add_backend(backend);
 | 
			
		||||
        load_balancer.add_frontend(frontend);
 | 
			
		||||
        load_balancer.add_servers(servers);
 | 
			
		||||
        if let Some(healthcheck) = healthcheck {
 | 
			
		||||
            load_balancer.add_healthcheck(healthcheck);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        load_balancer.configure_service(frontend, backend, servers, healthcheck);
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
 | 
			
		||||
                .backends
 | 
			
		||||
                .backends
 | 
			
		||||
                .iter()
 | 
			
		||||
                .find(|b| b.uuid == frontend.default_backend);
 | 
			
		||||
                .find(|b| Some(b.uuid.clone()) == frontend.default_backend);
 | 
			
		||||
 | 
			
		||||
            let mut health_check = None;
 | 
			
		||||
            match matching_backend {
 | 
			
		||||
@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
 | 
			
		||||
                }
 | 
			
		||||
                None => {
 | 
			
		||||
                    warn!(
 | 
			
		||||
                        "HAProxy config could not find a matching backend for frontend {:?}",
 | 
			
		||||
                        frontend
 | 
			
		||||
                        "HAProxy config could not find a matching backend for frontend {frontend:?}"
 | 
			
		||||
                    );
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend(
 | 
			
		||||
        .servers
 | 
			
		||||
        .iter()
 | 
			
		||||
        .filter_map(|server| {
 | 
			
		||||
            let address = server.address.clone()?;
 | 
			
		||||
            let port = server.port?;
 | 
			
		||||
 | 
			
		||||
            if backend_servers.contains(&server.uuid.as_str()) {
 | 
			
		||||
                return Some(BackendServer {
 | 
			
		||||
                    address: server.address.clone(),
 | 
			
		||||
                    port: server.port,
 | 
			
		||||
                });
 | 
			
		||||
                return Some(BackendServer { address, port });
 | 
			
		||||
            }
 | 
			
		||||
            None
 | 
			
		||||
        })
 | 
			
		||||
@ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
 | 
			
		||||
        name: format!("frontend_{}", service.listening_port),
 | 
			
		||||
        bind: service.listening_port.to_string(),
 | 
			
		||||
        mode: "tcp".to_string(), // TODO do not depend on health check here
 | 
			
		||||
        default_backend: backend.uuid.clone(),
 | 
			
		||||
        default_backend: Some(backend.uuid.clone()),
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
    };
 | 
			
		||||
    info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
 | 
			
		||||
@ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
 | 
			
		||||
        uuid: Uuid::new_v4().to_string(),
 | 
			
		||||
        name: format!("{}_{}", &server.address, &server.port),
 | 
			
		||||
        enabled: 1,
 | 
			
		||||
        address: server.address.clone(),
 | 
			
		||||
        port: server.port,
 | 
			
		||||
        address: Some(server.address.clone()),
 | 
			
		||||
        port: Some(server.port),
 | 
			
		||||
        mode: "active".to_string(),
 | 
			
		||||
        server_type: "static".to_string(),
 | 
			
		||||
        ..Default::default()
 | 
			
		||||
@ -385,8 +378,8 @@ mod tests {
 | 
			
		||||
        let mut haproxy = HAProxy::default();
 | 
			
		||||
        let server = HAProxyServer {
 | 
			
		||||
            uuid: "server1".to_string(),
 | 
			
		||||
            address: "192.168.1.1".to_string(),
 | 
			
		||||
            port: 80,
 | 
			
		||||
            address: Some("192.168.1.1".to_string()),
 | 
			
		||||
            port: Some(80),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        haproxy.servers.servers.push(server);
 | 
			
		||||
@ -411,8 +404,8 @@ mod tests {
 | 
			
		||||
        let mut haproxy = HAProxy::default();
 | 
			
		||||
        let server = HAProxyServer {
 | 
			
		||||
            uuid: "server1".to_string(),
 | 
			
		||||
            address: "192.168.1.1".to_string(),
 | 
			
		||||
            port: 80,
 | 
			
		||||
            address: Some("192.168.1.1".to_string()),
 | 
			
		||||
            port: Some(80),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        haproxy.servers.servers.push(server);
 | 
			
		||||
@ -431,8 +424,8 @@ mod tests {
 | 
			
		||||
        let mut haproxy = HAProxy::default();
 | 
			
		||||
        let server = HAProxyServer {
 | 
			
		||||
            uuid: "server1".to_string(),
 | 
			
		||||
            address: "192.168.1.1".to_string(),
 | 
			
		||||
            port: 80,
 | 
			
		||||
            address: Some("192.168.1.1".to_string()),
 | 
			
		||||
            port: Some(80),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        haproxy.servers.servers.push(server);
 | 
			
		||||
@ -453,16 +446,16 @@ mod tests {
 | 
			
		||||
        let mut haproxy = HAProxy::default();
 | 
			
		||||
        let server = HAProxyServer {
 | 
			
		||||
            uuid: "server1".to_string(),
 | 
			
		||||
            address: "some-hostname.test.mcd".to_string(),
 | 
			
		||||
            port: 80,
 | 
			
		||||
            address: Some("some-hostname.test.mcd".to_string()),
 | 
			
		||||
            port: Some(80),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        haproxy.servers.servers.push(server);
 | 
			
		||||
 | 
			
		||||
        let server = HAProxyServer {
 | 
			
		||||
            uuid: "server2".to_string(),
 | 
			
		||||
            address: "192.168.1.2".to_string(),
 | 
			
		||||
            port: 8080,
 | 
			
		||||
            address: Some("192.168.1.2".to_string()),
 | 
			
		||||
            port: Some(8080),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        haproxy.servers.servers.push(server);
 | 
			
		||||
 | 
			
		||||
@ -1,4 +1,5 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_macros::hurl;
 | 
			
		||||
use kube::{Api, api::GroupVersionKind};
 | 
			
		||||
use log::{debug, warn};
 | 
			
		||||
use non_blank_string_rs::NonBlankString;
 | 
			
		||||
@ -160,6 +161,9 @@ global:
 | 
			
		||||
  ## Used for ingresses, certificates, SSO, notifications, etc.
 | 
			
		||||
  domain: {domain}
 | 
			
		||||
 | 
			
		||||
  securityContext: 
 | 
			
		||||
    runAsUser: null
 | 
			
		||||
 | 
			
		||||
  # -- Runtime class name for all components
 | 
			
		||||
  runtimeClassName: ""
 | 
			
		||||
 | 
			
		||||
@ -471,6 +475,13 @@ redis:
 | 
			
		||||
  # -- Redis name
 | 
			
		||||
  name: redis
 | 
			
		||||
 | 
			
		||||
  serviceAccount:
 | 
			
		||||
    create: true
 | 
			
		||||
 | 
			
		||||
  securityContext:
 | 
			
		||||
    runAsUser: null
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ## Redis image
 | 
			
		||||
  image:
 | 
			
		||||
    # -- Redis repository
 | 
			
		||||
@ -1041,7 +1052,7 @@ commitServer:
 | 
			
		||||
        install_only: false,
 | 
			
		||||
        repository: Some(HelmRepository::new(
 | 
			
		||||
            "argo".to_string(),
 | 
			
		||||
            url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
 | 
			
		||||
            hurl!("https://argoproj.github.io/argo-helm"),
 | 
			
		||||
            true,
 | 
			
		||||
        )),
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -2,7 +2,11 @@ use crate::modules::application::{
 | 
			
		||||
    Application, ApplicationFeature, InstallationError, InstallationOutcome,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
 | 
			
		||||
use crate::modules::monitoring::grafana::grafana::Grafana;
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
 | 
			
		||||
    ServiceMonitor, ServiceMonitorSpec,
 | 
			
		||||
};
 | 
			
		||||
use crate::topology::MultiTargetTopology;
 | 
			
		||||
use crate::topology::ingress::Ingress;
 | 
			
		||||
use crate::{
 | 
			
		||||
@ -14,7 +18,7 @@ use crate::{
 | 
			
		||||
    topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
 | 
			
		||||
};
 | 
			
		||||
use crate::{
 | 
			
		||||
    modules::prometheus::prometheus::PrometheusApplicationMonitoring,
 | 
			
		||||
    modules::prometheus::prometheus::PrometheusMonitoring,
 | 
			
		||||
    topology::oberservability::monitoring::AlertReceiver,
 | 
			
		||||
};
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
@ -22,6 +26,7 @@ use base64::{Engine as _, engine::general_purpose};
 | 
			
		||||
use harmony_secret::SecretManager;
 | 
			
		||||
use harmony_secret_derive::Secret;
 | 
			
		||||
use harmony_types::net::Url;
 | 
			
		||||
use kube::api::ObjectMeta;
 | 
			
		||||
use log::{debug, info};
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
@ -40,7 +45,8 @@ impl<
 | 
			
		||||
        + TenantManager
 | 
			
		||||
        + K8sclient
 | 
			
		||||
        + MultiTargetTopology
 | 
			
		||||
        + PrometheusApplicationMonitoring<CRDPrometheus>
 | 
			
		||||
        + PrometheusMonitoring<CRDPrometheus>
 | 
			
		||||
        + Grafana
 | 
			
		||||
        + Ingress
 | 
			
		||||
        + std::fmt::Debug,
 | 
			
		||||
> ApplicationFeature<T> for Monitoring
 | 
			
		||||
@ -57,10 +63,20 @@ impl<
 | 
			
		||||
            .unwrap_or_else(|| self.application.name());
 | 
			
		||||
        let domain = topology.get_domain("ntfy").await.unwrap();
 | 
			
		||||
 | 
			
		||||
        let app_service_monitor = ServiceMonitor {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(self.application.name()),
 | 
			
		||||
                namespace: Some(namespace.clone()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: ServiceMonitorSpec::default(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let mut alerting_score = ApplicationMonitoringScore {
 | 
			
		||||
            sender: CRDPrometheus {
 | 
			
		||||
                namespace: namespace.clone(),
 | 
			
		||||
                client: topology.k8s_client().await.unwrap(),
 | 
			
		||||
                service_monitor: vec![app_service_monitor],
 | 
			
		||||
            },
 | 
			
		||||
            application: self.application.clone(),
 | 
			
		||||
            receivers: self.alert_receiver.clone(),
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,7 @@ use crate::{
 | 
			
		||||
    topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
 | 
			
		||||
};
 | 
			
		||||
use crate::{
 | 
			
		||||
    modules::prometheus::prometheus::PrometheusApplicationMonitoring,
 | 
			
		||||
    modules::prometheus::prometheus::PrometheusMonitoring,
 | 
			
		||||
    topology::oberservability::monitoring::AlertReceiver,
 | 
			
		||||
};
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
@ -42,7 +42,7 @@ impl<
 | 
			
		||||
        + MultiTargetTopology
 | 
			
		||||
        + Ingress
 | 
			
		||||
        + std::fmt::Debug
 | 
			
		||||
        + PrometheusApplicationMonitoring<RHOBObservability>,
 | 
			
		||||
        + PrometheusMonitoring<RHOBObservability>,
 | 
			
		||||
> ApplicationFeature<T> for Monitoring
 | 
			
		||||
{
 | 
			
		||||
    async fn ensure_installed(
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										209
									
								
								harmony/src/modules/cert_manager/cluster_issuer.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										209
									
								
								harmony/src/modules/cert_manager/cluster_issuer.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,209 @@
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
use kube::{CustomResource, api::ObjectMeta};
 | 
			
		||||
use schemars::JsonSchema;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    data::Version,
 | 
			
		||||
    interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::{K8sclient, Topology, k8s::K8sClient},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, Serialize)]
 | 
			
		||||
pub struct ClusterIssuerScore {
 | 
			
		||||
    email: String,
 | 
			
		||||
    server: String,
 | 
			
		||||
    issuer_name: String,
 | 
			
		||||
    namespace: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
        "ClusterIssuerScore".to_string()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[doc(hidden)]
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        Box::new(ClusterIssuerInterpret {
 | 
			
		||||
            score: self.clone(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
pub struct ClusterIssuerInterpret {
 | 
			
		||||
    score: ClusterIssuerScore,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
 | 
			
		||||
            .await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::Custom("ClusterIssuer")
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl ClusterIssuerInterpret {
 | 
			
		||||
    async fn validate_cert_manager(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: &Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let cert_manager = "cert-manager".to_string();
 | 
			
		||||
        let operator_namespace = "openshift-operators".to_string();
 | 
			
		||||
        match client
 | 
			
		||||
            .get_deployment(&cert_manager, Some(&operator_namespace))
 | 
			
		||||
            .await
 | 
			
		||||
        {
 | 
			
		||||
            Ok(Some(deployment)) => {
 | 
			
		||||
                if let Some(status) = deployment.status {
 | 
			
		||||
                    let ready_count = status.ready_replicas.unwrap_or(0);
 | 
			
		||||
                    if ready_count >= 1 {
 | 
			
		||||
                        return Ok(Outcome::success(format!(
 | 
			
		||||
                            "'{}' is ready with {} replica(s).",
 | 
			
		||||
                            &cert_manager, ready_count
 | 
			
		||||
                        )));
 | 
			
		||||
                    } else {
 | 
			
		||||
                        return Err(InterpretError::new(
 | 
			
		||||
                            "cert-manager operator not ready in cluster".to_string(),
 | 
			
		||||
                        ));
 | 
			
		||||
                    }
 | 
			
		||||
                } else {
 | 
			
		||||
                    Err(InterpretError::new(format!(
 | 
			
		||||
                        "failed to get deployment status {} in ns {}",
 | 
			
		||||
                        &cert_manager, &operator_namespace
 | 
			
		||||
                    )))
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            Ok(None) => Err(InterpretError::new(format!(
 | 
			
		||||
                "Deployment '{}' not found in namespace '{}'.",
 | 
			
		||||
                &cert_manager, &operator_namespace
 | 
			
		||||
            ))),
 | 
			
		||||
            Err(e) => Err(InterpretError::new(format!(
 | 
			
		||||
                "Failed to query for deployment '{}': {}",
 | 
			
		||||
                &cert_manager, e
 | 
			
		||||
            ))),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
 | 
			
		||||
        let issuer_name = &self.score.issuer_name;
 | 
			
		||||
        let email = &self.score.email;
 | 
			
		||||
        let server = &self.score.server;
 | 
			
		||||
        let namespace = &self.score.namespace;
 | 
			
		||||
        let cluster_issuer = ClusterIssuer {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(issuer_name.to_string()),
 | 
			
		||||
                namespace: Some(namespace.to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: ClusterIssuerSpec {
 | 
			
		||||
                acme: AcmeSpec {
 | 
			
		||||
                    email: email.to_string(),
 | 
			
		||||
                    private_key_secret_ref: PrivateKeySecretRef {
 | 
			
		||||
                        name: issuer_name.to_string(),
 | 
			
		||||
                    },
 | 
			
		||||
                    server: server.to_string(),
 | 
			
		||||
                    solvers: vec![SolverSpec {
 | 
			
		||||
                        http01: Some(Http01Solver {
 | 
			
		||||
                            ingress: Http01Ingress {
 | 
			
		||||
                                class: "nginx".to_string(),
 | 
			
		||||
                            },
 | 
			
		||||
                        }),
 | 
			
		||||
                    }],
 | 
			
		||||
                },
 | 
			
		||||
            },
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        Ok(cluster_issuer)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn apply_cluster_issuer(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let namespace = self.score.namespace.clone();
 | 
			
		||||
        self.validate_cert_manager(&client).await?;
 | 
			
		||||
        let cluster_issuer = self.build_cluster_issuer().unwrap();
 | 
			
		||||
        client
 | 
			
		||||
            .apply_yaml(
 | 
			
		||||
                &serde_yaml::to_value(cluster_issuer).unwrap(),
 | 
			
		||||
                Some(&namespace),
 | 
			
		||||
            )
 | 
			
		||||
            .await?;
 | 
			
		||||
        Ok(Outcome::success(format!(
 | 
			
		||||
            "successfully deployed cluster operator: {} in namespace: {}",
 | 
			
		||||
            self.score.issuer_name, self.score.namespace
 | 
			
		||||
        )))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[kube(
 | 
			
		||||
    group = "cert-manager.io",
 | 
			
		||||
    version = "v1",
 | 
			
		||||
    kind = "ClusterIssuer",
 | 
			
		||||
    plural = "clusterissuers"
 | 
			
		||||
)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct ClusterIssuerSpec {
 | 
			
		||||
    pub acme: AcmeSpec,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct AcmeSpec {
 | 
			
		||||
    pub email: String,
 | 
			
		||||
    pub private_key_secret_ref: PrivateKeySecretRef,
 | 
			
		||||
    pub server: String,
 | 
			
		||||
    pub solvers: Vec<SolverSpec>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct PrivateKeySecretRef {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct SolverSpec {
 | 
			
		||||
    pub http01: Option<Http01Solver>,
 | 
			
		||||
    // Other solver types (e.g., dns01) would go here as Options
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Http01Solver {
 | 
			
		||||
    pub ingress: Http01Ingress,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Http01Ingress {
 | 
			
		||||
    pub class: String,
 | 
			
		||||
}
 | 
			
		||||
@ -1,5 +1,6 @@
 | 
			
		||||
use std::{collections::HashMap, str::FromStr};
 | 
			
		||||
 | 
			
		||||
use harmony_macros::hurl;
 | 
			
		||||
use non_blank_string_rs::NonBlankString;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
use url::Url;
 | 
			
		||||
@ -33,7 +34,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
 | 
			
		||||
            install_only: true,
 | 
			
		||||
            repository: Some(HelmRepository::new(
 | 
			
		||||
                "jetstack".to_string(),
 | 
			
		||||
                Url::parse("https://charts.jetstack.io").unwrap(),
 | 
			
		||||
                hurl!("https://charts.jetstack.io"),
 | 
			
		||||
                true,
 | 
			
		||||
            )),
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@ -1,2 +1,3 @@
 | 
			
		||||
pub mod cluster_issuer;
 | 
			
		||||
mod helm;
 | 
			
		||||
pub use helm::*;
 | 
			
		||||
 | 
			
		||||
@ -5,6 +5,7 @@ use crate::score::Score;
 | 
			
		||||
use crate::topology::{HelmCommand, Topology};
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
use harmony_types::net::Url;
 | 
			
		||||
use helm_wrapper_rs;
 | 
			
		||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
 | 
			
		||||
use log::{debug, info, warn};
 | 
			
		||||
@ -15,7 +16,6 @@ use std::path::Path;
 | 
			
		||||
use std::process::{Command, Output, Stdio};
 | 
			
		||||
use std::str::FromStr;
 | 
			
		||||
use temp_file::TempFile;
 | 
			
		||||
use url::Url;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct HelmRepository {
 | 
			
		||||
@ -78,7 +78,8 @@ impl HelmChartInterpret {
 | 
			
		||||
            repo.name, repo.url, repo.force_update
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
 | 
			
		||||
        let repo_url = repo.url.to_string();
 | 
			
		||||
        let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
 | 
			
		||||
        if repo.force_update {
 | 
			
		||||
            add_args.push("--force-update");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@ -1,364 +0,0 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use log::debug;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
use std::collections::HashMap;
 | 
			
		||||
use std::io::ErrorKind;
 | 
			
		||||
use std::path::PathBuf;
 | 
			
		||||
use std::process::{Command, Output};
 | 
			
		||||
use temp_dir::{self, TempDir};
 | 
			
		||||
use temp_file::TempFile;
 | 
			
		||||
 | 
			
		||||
use crate::data::Version;
 | 
			
		||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
 | 
			
		||||
use crate::inventory::Inventory;
 | 
			
		||||
use crate::score::Score;
 | 
			
		||||
use crate::topology::{HelmCommand, K8sclient, Topology};
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
 | 
			
		||||
#[derive(Clone)]
 | 
			
		||||
pub struct HelmCommandExecutor {
 | 
			
		||||
    pub env: HashMap<String, String>,
 | 
			
		||||
    pub path: Option<PathBuf>,
 | 
			
		||||
    pub args: Vec<String>,
 | 
			
		||||
    pub api_versions: Option<Vec<String>>,
 | 
			
		||||
    pub kube_version: String,
 | 
			
		||||
    pub debug: Option<bool>,
 | 
			
		||||
    pub globals: HelmGlobals,
 | 
			
		||||
    pub chart: HelmChart,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone)]
 | 
			
		||||
pub struct HelmGlobals {
 | 
			
		||||
    pub chart_home: Option<PathBuf>,
 | 
			
		||||
    pub config_home: Option<PathBuf>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct HelmChart {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub version: Option<String>,
 | 
			
		||||
    pub repo: Option<String>,
 | 
			
		||||
    pub release_name: Option<String>,
 | 
			
		||||
    pub namespace: Option<String>,
 | 
			
		||||
    pub additional_values_files: Vec<PathBuf>,
 | 
			
		||||
    pub values_file: Option<PathBuf>,
 | 
			
		||||
    pub values_inline: Option<String>,
 | 
			
		||||
    pub include_crds: Option<bool>,
 | 
			
		||||
    pub skip_hooks: Option<bool>,
 | 
			
		||||
    pub api_versions: Option<Vec<String>>,
 | 
			
		||||
    pub kube_version: Option<String>,
 | 
			
		||||
    pub name_template: String,
 | 
			
		||||
    pub skip_tests: Option<bool>,
 | 
			
		||||
    pub debug: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl HelmCommandExecutor {
 | 
			
		||||
    pub fn generate(mut self) -> Result<String, std::io::Error> {
 | 
			
		||||
        if self.globals.chart_home.is_none() {
 | 
			
		||||
            self.globals.chart_home = Some(PathBuf::from("charts"));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if self
 | 
			
		||||
            .clone()
 | 
			
		||||
            .chart
 | 
			
		||||
            .clone()
 | 
			
		||||
            .chart_exists_locally(self.clone().globals.chart_home.unwrap())
 | 
			
		||||
            .is_none()
 | 
			
		||||
        {
 | 
			
		||||
            if self.chart.repo.is_none() {
 | 
			
		||||
                return Err(std::io::Error::new(
 | 
			
		||||
                    ErrorKind::Other,
 | 
			
		||||
                    "Chart doesn't exist locally and no repo specified",
 | 
			
		||||
                ));
 | 
			
		||||
            }
 | 
			
		||||
            self.clone().run_command(
 | 
			
		||||
                self.chart
 | 
			
		||||
                    .clone()
 | 
			
		||||
                    .pull_command(self.globals.chart_home.clone().unwrap()),
 | 
			
		||||
            )?;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let out = self.clone().run_command(
 | 
			
		||||
            self.chart
 | 
			
		||||
                .clone()
 | 
			
		||||
                .helm_args(self.globals.chart_home.clone().unwrap()),
 | 
			
		||||
        )?;
 | 
			
		||||
 | 
			
		||||
        // TODO: don't use unwrap here
 | 
			
		||||
        let s = String::from_utf8(out.stdout).unwrap();
 | 
			
		||||
        debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
 | 
			
		||||
        debug!("helm status: {}", out.status);
 | 
			
		||||
        debug!("helm output: {s}");
 | 
			
		||||
 | 
			
		||||
        let clean = s.split_once("---").unwrap().1;
 | 
			
		||||
 | 
			
		||||
        Ok(clean.to_string())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn version(self) -> Result<String, std::io::Error> {
 | 
			
		||||
        let out = self.run_command(vec![
 | 
			
		||||
            "version".to_string(),
 | 
			
		||||
            "-c".to_string(),
 | 
			
		||||
            "--short".to_string(),
 | 
			
		||||
        ])?;
 | 
			
		||||
 | 
			
		||||
        // TODO: don't use unwrap
 | 
			
		||||
        Ok(String::from_utf8(out.stdout).unwrap())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
 | 
			
		||||
        if let Some(d) = self.debug {
 | 
			
		||||
            if d {
 | 
			
		||||
                args.push("--debug".to_string());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let path = if let Some(p) = self.path {
 | 
			
		||||
            p
 | 
			
		||||
        } else {
 | 
			
		||||
            PathBuf::from("helm")
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let config_home = match self.globals.config_home {
 | 
			
		||||
            Some(p) => p,
 | 
			
		||||
            None => PathBuf::from(TempDir::new()?.path()),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        if let Some(yaml_str) = self.chart.values_inline {
 | 
			
		||||
            let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
 | 
			
		||||
            self.chart
 | 
			
		||||
                .additional_values_files
 | 
			
		||||
                .push(PathBuf::from(tf.path()));
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        self.env.insert(
 | 
			
		||||
            "HELM_CONFIG_HOME".to_string(),
 | 
			
		||||
            config_home.to_str().unwrap().to_string(),
 | 
			
		||||
        );
 | 
			
		||||
        self.env.insert(
 | 
			
		||||
            "HELM_CACHE_HOME".to_string(),
 | 
			
		||||
            config_home.to_str().unwrap().to_string(),
 | 
			
		||||
        );
 | 
			
		||||
        self.env.insert(
 | 
			
		||||
            "HELM_DATA_HOME".to_string(),
 | 
			
		||||
            config_home.to_str().unwrap().to_string(),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        Command::new(path).envs(self.env).args(args).output()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl HelmChart {
 | 
			
		||||
    pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
 | 
			
		||||
        let chart_path =
 | 
			
		||||
            PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
 | 
			
		||||
 | 
			
		||||
        if chart_path.exists() {
 | 
			
		||||
            Some(chart_path)
 | 
			
		||||
        } else {
 | 
			
		||||
            None
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
 | 
			
		||||
        let mut args = vec![
 | 
			
		||||
            "pull".to_string(),
 | 
			
		||||
            "--untar".to_string(),
 | 
			
		||||
            "--untardir".to_string(),
 | 
			
		||||
            chart_home.to_str().unwrap().to_string(),
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        match self.repo {
 | 
			
		||||
            Some(r) => {
 | 
			
		||||
                if r.starts_with("oci://") {
 | 
			
		||||
                    args.push(
 | 
			
		||||
                        r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
 | 
			
		||||
                    );
 | 
			
		||||
                } else {
 | 
			
		||||
                    args.push("--repo".to_string());
 | 
			
		||||
                    args.push(r.to_string());
 | 
			
		||||
 | 
			
		||||
                    args.push(self.name);
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            None => args.push(self.name),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        if let Some(v) = self.version {
 | 
			
		||||
            args.push("--version".to_string());
 | 
			
		||||
            args.push(v.to_string());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        args
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
 | 
			
		||||
        let mut args: Vec<String> = vec!["template".to_string()];
 | 
			
		||||
 | 
			
		||||
        match self.release_name {
 | 
			
		||||
            Some(rn) => args.push(rn.to_string()),
 | 
			
		||||
            None => args.push("--generate-name".to_string()),
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        args.push(
 | 
			
		||||
            PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
 | 
			
		||||
                .to_str()
 | 
			
		||||
                .unwrap()
 | 
			
		||||
                .to_string(),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        if let Some(n) = self.namespace {
 | 
			
		||||
            args.push("--namespace".to_string());
 | 
			
		||||
            args.push(n.to_string());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(f) = self.values_file {
 | 
			
		||||
            args.push("-f".to_string());
 | 
			
		||||
            args.push(f.to_str().unwrap().to_string());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        for f in self.additional_values_files {
 | 
			
		||||
            args.push("-f".to_string());
 | 
			
		||||
            args.push(f.to_str().unwrap().to_string());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(vv) = self.api_versions {
 | 
			
		||||
            for v in vv {
 | 
			
		||||
                args.push("--api-versions".to_string());
 | 
			
		||||
                args.push(v);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(kv) = self.kube_version {
 | 
			
		||||
            args.push("--kube-version".to_string());
 | 
			
		||||
            args.push(kv);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(crd) = self.include_crds {
 | 
			
		||||
            if crd {
 | 
			
		||||
                args.push("--include-crds".to_string());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(st) = self.skip_tests {
 | 
			
		||||
            if st {
 | 
			
		||||
                args.push("--skip-tests".to_string());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(sh) = self.skip_hooks {
 | 
			
		||||
            if sh {
 | 
			
		||||
                args.push("--no-hooks".to_string());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some(d) = self.debug {
 | 
			
		||||
            if d {
 | 
			
		||||
                args.push("--debug".to_string());
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        args
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct HelmChartScoreV2 {
 | 
			
		||||
    pub chart: HelmChart,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        Box::new(HelmChartInterpretV2 {
 | 
			
		||||
            score: self.clone(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
        format!(
 | 
			
		||||
            "{} {} HelmChartScoreV2",
 | 
			
		||||
            self.chart
 | 
			
		||||
                .release_name
 | 
			
		||||
                .clone()
 | 
			
		||||
                .unwrap_or("Unknown".to_string()),
 | 
			
		||||
            self.chart.name
 | 
			
		||||
        )
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Serialize)]
 | 
			
		||||
pub struct HelmChartInterpretV2 {
 | 
			
		||||
    pub score: HelmChartScoreV2,
 | 
			
		||||
}
 | 
			
		||||
impl HelmChartInterpretV2 {}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        _topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let _ns = self
 | 
			
		||||
            .score
 | 
			
		||||
            .chart
 | 
			
		||||
            .namespace
 | 
			
		||||
            .as_ref()
 | 
			
		||||
            .unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
 | 
			
		||||
 | 
			
		||||
        let helm_executor = HelmCommandExecutor {
 | 
			
		||||
            env: HashMap::new(),
 | 
			
		||||
            path: None,
 | 
			
		||||
            args: vec![],
 | 
			
		||||
            api_versions: None,
 | 
			
		||||
            kube_version: "v1.33.0".to_string(),
 | 
			
		||||
            debug: Some(false),
 | 
			
		||||
            globals: HelmGlobals {
 | 
			
		||||
                chart_home: None,
 | 
			
		||||
                config_home: None,
 | 
			
		||||
            },
 | 
			
		||||
            chart: self.score.chart.clone(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        // let mut helm_options = Vec::new();
 | 
			
		||||
        // if self.score.create_namespace {
 | 
			
		||||
        //     helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
 | 
			
		||||
        // }
 | 
			
		||||
 | 
			
		||||
        let res = helm_executor.generate();
 | 
			
		||||
 | 
			
		||||
        let _output = match res {
 | 
			
		||||
            Ok(output) => output,
 | 
			
		||||
            Err(err) => return Err(InterpretError::new(err.to_string())),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        // TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
 | 
			
		||||
 | 
			
		||||
        // let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
 | 
			
		||||
 | 
			
		||||
        // let client = topology
 | 
			
		||||
        //     .k8s_client()
 | 
			
		||||
        //     .await
 | 
			
		||||
        //     .expect("Environment should provide enough information to instanciate a client")
 | 
			
		||||
        //     .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
 | 
			
		||||
        // match client.apply_yaml(output) {
 | 
			
		||||
        //     Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
 | 
			
		||||
        //     Err(e) => return Err(InterpretError::new(e)),
 | 
			
		||||
        // }
 | 
			
		||||
 | 
			
		||||
        Ok(Outcome::success("Helm chart deployed".to_string()))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::HelmCommand
 | 
			
		||||
    }
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -1,2 +1 @@
 | 
			
		||||
pub mod chart;
 | 
			
		||||
pub mod command;
 | 
			
		||||
 | 
			
		||||
@ -38,13 +38,15 @@ impl<
 | 
			
		||||
        + 'static
 | 
			
		||||
        + Send
 | 
			
		||||
        + Clone,
 | 
			
		||||
    T: Topology,
 | 
			
		||||
    T: Topology + K8sclient,
 | 
			
		||||
> Score<T> for K8sResourceScore<K>
 | 
			
		||||
where
 | 
			
		||||
    <K as kube::Resource>::DynamicType: Default,
 | 
			
		||||
{
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        todo!()
 | 
			
		||||
        Box::new(K8sResourceInterpret {
 | 
			
		||||
            score: self.clone(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
 | 
			
		||||
@ -1,21 +1,23 @@
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use log::debug;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    data::Version,
 | 
			
		||||
    interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    interpret::Interpret,
 | 
			
		||||
    modules::{
 | 
			
		||||
        application::Application,
 | 
			
		||||
        monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
 | 
			
		||||
        prometheus::prometheus::PrometheusApplicationMonitoring,
 | 
			
		||||
        monitoring::{
 | 
			
		||||
            grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
 | 
			
		||||
        },
 | 
			
		||||
        prometheus::prometheus::PrometheusMonitoring,
 | 
			
		||||
    },
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
 | 
			
		||||
    topology::{
 | 
			
		||||
        K8sclient, Topology,
 | 
			
		||||
        oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
 | 
			
		||||
    },
 | 
			
		||||
};
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct ApplicationMonitoringScore {
 | 
			
		||||
@ -24,12 +26,16 @@ pub struct ApplicationMonitoringScore {
 | 
			
		||||
    pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
 | 
			
		||||
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T>
 | 
			
		||||
    for ApplicationMonitoringScore
 | 
			
		||||
{
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        Box::new(ApplicationMonitoringInterpret {
 | 
			
		||||
            score: self.clone(),
 | 
			
		||||
        debug!("creating alerting interpret");
 | 
			
		||||
        Box::new(AlertingInterpret {
 | 
			
		||||
            sender: self.sender.clone(),
 | 
			
		||||
            receivers: self.receivers.clone(),
 | 
			
		||||
            rules: vec![],
 | 
			
		||||
            scrape_targets: None,
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -40,55 +46,3 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
 | 
			
		||||
        )
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct ApplicationMonitoringInterpret {
 | 
			
		||||
    score: ApplicationMonitoringScore,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
 | 
			
		||||
    for ApplicationMonitoringInterpret
 | 
			
		||||
{
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let result = topology
 | 
			
		||||
            .install_prometheus(
 | 
			
		||||
                &self.score.sender,
 | 
			
		||||
                inventory,
 | 
			
		||||
                Some(self.score.receivers.clone()),
 | 
			
		||||
            )
 | 
			
		||||
            .await;
 | 
			
		||||
 | 
			
		||||
        match result {
 | 
			
		||||
            Ok(outcome) => match outcome {
 | 
			
		||||
                PreparationOutcome::Success { details: _ } => {
 | 
			
		||||
                    Ok(Outcome::success("Prometheus installed".into()))
 | 
			
		||||
                }
 | 
			
		||||
                PreparationOutcome::Noop => {
 | 
			
		||||
                    Ok(Outcome::noop("Prometheus installation skipped".into()))
 | 
			
		||||
                }
 | 
			
		||||
            },
 | 
			
		||||
            Err(err) => Err(InterpretError::from(err)),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::ApplicationMonitoring
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -12,7 +12,7 @@ use crate::{
 | 
			
		||||
        monitoring::kube_prometheus::crd::{
 | 
			
		||||
            crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
 | 
			
		||||
        },
 | 
			
		||||
        prometheus::prometheus::PrometheusApplicationMonitoring,
 | 
			
		||||
        prometheus::prometheus::PrometheusMonitoring,
 | 
			
		||||
    },
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
 | 
			
		||||
@ -26,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
 | 
			
		||||
    pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
 | 
			
		||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T>
 | 
			
		||||
    for ApplicationRHOBMonitoringScore
 | 
			
		||||
{
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
@ -49,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
 | 
			
		||||
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T>
 | 
			
		||||
    for ApplicationRHOBMonitoringInterpret
 | 
			
		||||
{
 | 
			
		||||
    async fn execute(
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										17
									
								
								harmony/src/modules/monitoring/grafana/grafana.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								harmony/src/modules/monitoring/grafana/grafana.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,17 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use k8s_openapi::Resource;
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    topology::{PreparationError, PreparationOutcome},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait Grafana {
 | 
			
		||||
    async fn ensure_grafana_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError>;
 | 
			
		||||
 | 
			
		||||
    async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
 | 
			
		||||
}
 | 
			
		||||
@ -1,27 +1,28 @@
 | 
			
		||||
use harmony_macros::hurl;
 | 
			
		||||
use non_blank_string_rs::NonBlankString;
 | 
			
		||||
use std::str::FromStr;
 | 
			
		||||
use std::{collections::HashMap, str::FromStr};
 | 
			
		||||
 | 
			
		||||
use crate::modules::helm::chart::HelmChartScore;
 | 
			
		||||
 | 
			
		||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
 | 
			
		||||
    let values = r#"
 | 
			
		||||
rbac:
 | 
			
		||||
  namespaced: true
 | 
			
		||||
sidecar:
 | 
			
		||||
  dashboards:
 | 
			
		||||
    enabled: true
 | 
			
		||||
        "#
 | 
			
		||||
    .to_string();
 | 
			
		||||
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
 | 
			
		||||
 | 
			
		||||
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
 | 
			
		||||
    let mut values_overrides = HashMap::new();
 | 
			
		||||
    values_overrides.insert(
 | 
			
		||||
        NonBlankString::from_str("namespaceScope").unwrap(),
 | 
			
		||||
        namespace_scope.to_string(),
 | 
			
		||||
    );
 | 
			
		||||
    HelmChartScore {
 | 
			
		||||
        namespace: Some(NonBlankString::from_str(ns).unwrap()),
 | 
			
		||||
        release_name: NonBlankString::from_str("grafana").unwrap(),
 | 
			
		||||
        chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
 | 
			
		||||
        release_name: NonBlankString::from_str("grafana-operator").unwrap(),
 | 
			
		||||
        chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(),
 | 
			
		||||
        chart_version: None,
 | 
			
		||||
        values_overrides: None,
 | 
			
		||||
        values_yaml: Some(values.to_string()),
 | 
			
		||||
        values_overrides: Some(values_overrides),
 | 
			
		||||
        values_yaml: None,
 | 
			
		||||
        create_namespace: true,
 | 
			
		||||
        install_only: true,
 | 
			
		||||
        repository: None,
 | 
			
		||||
        repository: Some(HelmRepository::new(
 | 
			
		||||
            "grafana".to_string(),
 | 
			
		||||
            hurl!("https://grafana.github.io/helm-charts"),
 | 
			
		||||
            true,
 | 
			
		||||
        )),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1 +1,2 @@
 | 
			
		||||
pub mod grafana;
 | 
			
		||||
pub mod helm;
 | 
			
		||||
 | 
			
		||||
@ -1,12 +1,25 @@
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use kube::CustomResource;
 | 
			
		||||
use schemars::JsonSchema;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
use crate::topology::{
 | 
			
		||||
    k8s::K8sClient,
 | 
			
		||||
    oberservability::monitoring::{AlertReceiver, AlertSender},
 | 
			
		||||
use crate::{
 | 
			
		||||
    interpret::{InterpretError, Outcome},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    modules::{
 | 
			
		||||
        monitoring::{
 | 
			
		||||
            grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
 | 
			
		||||
        },
 | 
			
		||||
        prometheus::prometheus::PrometheusMonitoring,
 | 
			
		||||
    },
 | 
			
		||||
    topology::{
 | 
			
		||||
        K8sclient, Topology,
 | 
			
		||||
        installable::Installable,
 | 
			
		||||
        k8s::K8sClient,
 | 
			
		||||
        oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
 | 
			
		||||
    },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
@ -26,6 +39,7 @@ pub struct AlertmanagerConfigSpec {
 | 
			
		||||
pub struct CRDPrometheus {
 | 
			
		||||
    pub namespace: String,
 | 
			
		||||
    pub client: Arc<K8sClient>,
 | 
			
		||||
    pub service_monitor: Vec<ServiceMonitor>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl AlertSender for CRDPrometheus {
 | 
			
		||||
@ -40,6 +54,12 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
 | 
			
		||||
    fn clone(&self) -> Self {
 | 
			
		||||
        self.clone_box()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
 | 
			
		||||
    fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
 | 
			
		||||
    where
 | 
			
		||||
@ -48,3 +68,24 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
 | 
			
		||||
    for CRDPrometheus
 | 
			
		||||
{
 | 
			
		||||
    async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
 | 
			
		||||
        topology.ensure_grafana_operator(inventory).await?;
 | 
			
		||||
        topology.ensure_prometheus_operator(self, inventory).await?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn ensure_installed(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<(), InterpretError> {
 | 
			
		||||
        topology.install_grafana().await?;
 | 
			
		||||
        topology.install_prometheus(&self, inventory, None).await?;
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -103,9 +103,34 @@ pub struct GrafanaDashboardSpec {
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub resync_period: Option<String>,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
 | 
			
		||||
 | 
			
		||||
    pub instance_selector: LabelSelector,
 | 
			
		||||
 | 
			
		||||
    pub json: String,
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub json: Option<String>,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub grafana_com: Option<GrafanaCom>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaDashboardDatasource {
 | 
			
		||||
    pub input_name: String,
 | 
			
		||||
    pub datasource_name: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ------------------------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaCom {
 | 
			
		||||
    pub id: u32,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub revision: Option<u32>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ------------------------------------------------------------------------------------------------
 | 
			
		||||
@ -126,20 +151,79 @@ pub struct GrafanaDatasourceSpec {
 | 
			
		||||
    pub allow_cross_namespace_import: Option<bool>,
 | 
			
		||||
 | 
			
		||||
    pub datasource: GrafanaDatasourceConfig,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub values_from: Option<Vec<GrafanaValueFrom>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaValueFrom {
 | 
			
		||||
    pub target_path: String,
 | 
			
		||||
    pub value_from: GrafanaValueSource,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaValueSource {
 | 
			
		||||
    pub secret_key_ref: GrafanaSecretKeyRef,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaSecretKeyRef {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub key: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaDatasourceConfig {
 | 
			
		||||
    pub access: String,
 | 
			
		||||
    pub database: Option<String>,
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub json_data: Option<BTreeMap<String, String>>,
 | 
			
		||||
    pub database: Option<String>,
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub r#type: String,
 | 
			
		||||
    pub url: String,
 | 
			
		||||
    /// Represents jsonData in the GrafanaDatasource spec
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub json_data: Option<GrafanaDatasourceJsonData>,
 | 
			
		||||
 | 
			
		||||
    /// Represents secureJsonData (secrets)
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub is_default: Option<bool>,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub editable: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaDatasourceJsonData {
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub time_interval: Option<String>,
 | 
			
		||||
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub http_header_name1: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Disable TLS skip verification (false = verify)
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub tls_skip_verify: Option<bool>,
 | 
			
		||||
 | 
			
		||||
    /// Auth type - set to "forward" for OpenShift OAuth identity
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub oauth_pass_thru: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct GrafanaDatasourceSecureJsonData {
 | 
			
		||||
    #[serde(default, skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub http_header_value1: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
// ------------------------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
 | 
			
		||||
 | 
			
		||||
@ -0,0 +1,187 @@
 | 
			
		||||
use std::net::IpAddr;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use kube::CustomResource;
 | 
			
		||||
use schemars::JsonSchema;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    modules::monitoring::kube_prometheus::crd::{
 | 
			
		||||
        crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
 | 
			
		||||
    },
 | 
			
		||||
    topology::oberservability::monitoring::ScrapeTarget,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
 | 
			
		||||
#[kube(
 | 
			
		||||
    group = "monitoring.coreos.com",
 | 
			
		||||
    version = "v1alpha1",
 | 
			
		||||
    kind = "ScrapeConfig",
 | 
			
		||||
    plural = "scrapeconfigs",
 | 
			
		||||
    namespaced
 | 
			
		||||
)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct ScrapeConfigSpec {
 | 
			
		||||
    /// List of static configurations.
 | 
			
		||||
    pub static_configs: Option<Vec<StaticConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// Kubernetes service discovery.
 | 
			
		||||
    pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// HTTP-based service discovery.
 | 
			
		||||
    pub http_sd_configs: Option<Vec<HttpSDConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// File-based service discovery.
 | 
			
		||||
    pub file_sd_configs: Option<Vec<FileSDConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// DNS-based service discovery.
 | 
			
		||||
    pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// Consul service discovery.
 | 
			
		||||
    pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// Relabeling configuration applied to discovered targets.
 | 
			
		||||
    pub relabel_configs: Option<Vec<RelabelConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// Metric relabeling configuration applied to scraped samples.
 | 
			
		||||
    pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
 | 
			
		||||
 | 
			
		||||
    /// Path to scrape metrics from (defaults to `/metrics`).
 | 
			
		||||
    pub metrics_path: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Interval at which Prometheus scrapes targets (e.g., "30s").
 | 
			
		||||
    pub scrape_interval: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Timeout for scraping (e.g., "10s").
 | 
			
		||||
    pub scrape_timeout: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Optional job name override.
 | 
			
		||||
    pub job_name: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Optional scheme (http or https).
 | 
			
		||||
    pub scheme: Option<String>,
 | 
			
		||||
 | 
			
		||||
    /// Authorization paramaters for snmp walk
 | 
			
		||||
    pub params: Option<Params>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Static configuration section of a ScrapeConfig.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct StaticConfig {
 | 
			
		||||
    pub targets: Vec<String>,
 | 
			
		||||
 | 
			
		||||
    pub labels: Option<LabelSelector>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Relabeling configuration for target or metric relabeling.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct RelabelConfig {
 | 
			
		||||
    pub source_labels: Option<Vec<String>>,
 | 
			
		||||
    pub separator: Option<String>,
 | 
			
		||||
    pub target_label: Option<String>,
 | 
			
		||||
    pub regex: Option<String>,
 | 
			
		||||
    pub modulus: Option<u64>,
 | 
			
		||||
    pub replacement: Option<String>,
 | 
			
		||||
    pub action: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Kubernetes service discovery configuration.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct KubernetesSDConfig {
 | 
			
		||||
    ///"pod", "service", "endpoints"pub role: String,
 | 
			
		||||
    pub namespaces: Option<NamespaceSelector>,
 | 
			
		||||
    pub selectors: Option<Vec<LabelSelector>>,
 | 
			
		||||
    pub api_server: Option<String>,
 | 
			
		||||
    pub bearer_token_file: Option<String>,
 | 
			
		||||
    pub tls_config: Option<TLSConfig>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Namespace selector for Kubernetes service discovery.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct NamespaceSelector {
 | 
			
		||||
    pub any: Option<bool>,
 | 
			
		||||
    pub match_names: Option<Vec<String>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// HTTP-based service discovery configuration.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct HttpSDConfig {
 | 
			
		||||
    pub url: String,
 | 
			
		||||
    pub refresh_interval: Option<String>,
 | 
			
		||||
    pub basic_auth: Option<BasicAuth>,
 | 
			
		||||
    pub authorization: Option<Authorization>,
 | 
			
		||||
    pub tls_config: Option<TLSConfig>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// File-based service discovery configuration.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct FileSDConfig {
 | 
			
		||||
    pub files: Vec<String>,
 | 
			
		||||
    pub refresh_interval: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// DNS-based service discovery configuration.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct DnsSDConfig {
 | 
			
		||||
    pub names: Vec<String>,
 | 
			
		||||
    pub refresh_interval: Option<String>,
 | 
			
		||||
    pub type_: Option<String>, // SRV, A, AAAA
 | 
			
		||||
    pub port: Option<u16>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Consul service discovery configuration.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct ConsulSDConfig {
 | 
			
		||||
    pub server: String,
 | 
			
		||||
    pub services: Option<Vec<String>>,
 | 
			
		||||
    pub scheme: Option<String>,
 | 
			
		||||
    pub datacenter: Option<String>,
 | 
			
		||||
    pub tag_separator: Option<String>,
 | 
			
		||||
    pub refresh_interval: Option<String>,
 | 
			
		||||
    pub tls_config: Option<TLSConfig>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Basic authentication credentials.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct BasicAuth {
 | 
			
		||||
    pub username: String,
 | 
			
		||||
    pub password: Option<String>,
 | 
			
		||||
    pub password_file: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Bearer token or other auth mechanisms.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Authorization {
 | 
			
		||||
    pub credentials: Option<String>,
 | 
			
		||||
    pub credentials_file: Option<String>,
 | 
			
		||||
    pub type_: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// TLS configuration for secure scraping.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct TLSConfig {
 | 
			
		||||
    pub ca_file: Option<String>,
 | 
			
		||||
    pub cert_file: Option<String>,
 | 
			
		||||
    pub key_file: Option<String>,
 | 
			
		||||
    pub server_name: Option<String>,
 | 
			
		||||
    pub insecure_skip_verify: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Authorization parameters for SNMP walk.
 | 
			
		||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct Params {
 | 
			
		||||
    pub auth: Option<Vec<String>>,
 | 
			
		||||
    pub module: Option<Vec<String>>,
 | 
			
		||||
}
 | 
			
		||||
@ -4,6 +4,7 @@ pub mod crd_default_rules;
 | 
			
		||||
pub mod crd_grafana;
 | 
			
		||||
pub mod crd_prometheus_rules;
 | 
			
		||||
pub mod crd_prometheuses;
 | 
			
		||||
pub mod crd_scrape_config;
 | 
			
		||||
pub mod grafana_default_dashboard;
 | 
			
		||||
pub mod grafana_operator;
 | 
			
		||||
pub mod prometheus_operator;
 | 
			
		||||
 | 
			
		||||
@ -31,6 +31,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
 | 
			
		||||
            sender: KubePrometheus { config },
 | 
			
		||||
            receivers: self.receivers.clone(),
 | 
			
		||||
            rules: self.rules.clone(),
 | 
			
		||||
            scrape_targets: None,
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
 | 
			
		||||
@ -4,4 +4,6 @@ pub mod application_monitoring;
 | 
			
		||||
pub mod grafana;
 | 
			
		||||
pub mod kube_prometheus;
 | 
			
		||||
pub mod ntfy;
 | 
			
		||||
pub mod okd;
 | 
			
		||||
pub mod prometheus;
 | 
			
		||||
pub mod scrape_target;
 | 
			
		||||
 | 
			
		||||
@ -100,11 +100,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
 | 
			
		||||
 | 
			
		||||
        info!("deploying ntfy...");
 | 
			
		||||
        client
 | 
			
		||||
            .wait_until_deployment_ready(
 | 
			
		||||
                "ntfy".to_string(),
 | 
			
		||||
                Some(self.score.namespace.as_str()),
 | 
			
		||||
                None,
 | 
			
		||||
            )
 | 
			
		||||
            .wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
 | 
			
		||||
            .await?;
 | 
			
		||||
        info!("ntfy deployed");
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										149
									
								
								harmony/src/modules/monitoring/okd/enable_user_workload.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										149
									
								
								harmony/src/modules/monitoring/okd/enable_user_workload.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,149 @@
 | 
			
		||||
use std::{collections::BTreeMap, sync::Arc};
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    data::Version,
 | 
			
		||||
    interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::{K8sclient, Topology, k8s::K8sClient},
 | 
			
		||||
};
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
use k8s_openapi::api::core::v1::ConfigMap;
 | 
			
		||||
use kube::api::ObjectMeta;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, Serialize)]
 | 
			
		||||
pub struct OpenshiftUserWorkloadMonitoring {}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
        "OpenshiftUserWorkloadMonitoringScore".to_string()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, Serialize)]
 | 
			
		||||
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let client = topology.k8s_client().await.unwrap();
 | 
			
		||||
        self.update_cluster_monitoring_config_cm(&client).await?;
 | 
			
		||||
        self.update_user_workload_monitoring_config_cm(&client)
 | 
			
		||||
            .await?;
 | 
			
		||||
        self.verify_user_workload(&client).await?;
 | 
			
		||||
        Ok(Outcome::success(
 | 
			
		||||
            "successfully enabled user-workload-monitoring".to_string(),
 | 
			
		||||
        ))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl OpenshiftUserWorkloadMonitoringInterpret {
 | 
			
		||||
    pub async fn update_cluster_monitoring_config_cm(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: &Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let mut data = BTreeMap::new();
 | 
			
		||||
        data.insert(
 | 
			
		||||
            "config.yaml".to_string(),
 | 
			
		||||
            r#"
 | 
			
		||||
enableUserWorkload: true
 | 
			
		||||
alertmanagerMain:
 | 
			
		||||
  enableUserAlertmanagerConfig: true
 | 
			
		||||
"#
 | 
			
		||||
            .to_string(),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        let cm = ConfigMap {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some("cluster-monitoring-config".to_string()),
 | 
			
		||||
                namespace: Some("openshift-monitoring".to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            data: Some(data),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        client.apply(&cm, Some("openshift-monitoring")).await?;
 | 
			
		||||
 | 
			
		||||
        Ok(Outcome::success(
 | 
			
		||||
            "updated cluster-monitoring-config-map".to_string(),
 | 
			
		||||
        ))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn update_user_workload_monitoring_config_cm(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: &Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let mut data = BTreeMap::new();
 | 
			
		||||
        data.insert(
 | 
			
		||||
            "config.yaml".to_string(),
 | 
			
		||||
            r#"
 | 
			
		||||
alertmanager: 
 | 
			
		||||
  enabled: true
 | 
			
		||||
  enableAlertmanagerConfig: true
 | 
			
		||||
"#
 | 
			
		||||
            .to_string(),
 | 
			
		||||
        );
 | 
			
		||||
        let cm = ConfigMap {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some("user-workload-monitoring-config".to_string()),
 | 
			
		||||
                namespace: Some("openshift-user-workload-monitoring".to_string()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            data: Some(data),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        };
 | 
			
		||||
        client
 | 
			
		||||
            .apply(&cm, Some("openshift-user-workload-monitoring"))
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        Ok(Outcome::success(
 | 
			
		||||
            "updated openshift-user-monitoring-config-map".to_string(),
 | 
			
		||||
        ))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn verify_user_workload(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: &Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let namespace = "openshift-user-workload-monitoring";
 | 
			
		||||
        let alertmanager_name = "alertmanager-user-workload-0";
 | 
			
		||||
        let prometheus_name = "prometheus-user-workload-0";
 | 
			
		||||
        client
 | 
			
		||||
            .wait_for_pod_ready(alertmanager_name, Some(namespace))
 | 
			
		||||
            .await?;
 | 
			
		||||
        client
 | 
			
		||||
            .wait_for_pod_ready(prometheus_name, Some(namespace))
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        Ok(Outcome::success(format!(
 | 
			
		||||
            "pods: {}, {} ready in ns: {}",
 | 
			
		||||
            alertmanager_name, prometheus_name, namespace
 | 
			
		||||
        )))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										1
									
								
								harmony/src/modules/monitoring/okd/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								harmony/src/modules/monitoring/okd/mod.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
pub mod enable_user_workload;
 | 
			
		||||
@ -114,7 +114,7 @@ impl Prometheus {
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        if let Some(ns) = namespace.as_deref() {
 | 
			
		||||
            grafana_helm_chart_score(ns)
 | 
			
		||||
            grafana_helm_chart_score(ns, false)
 | 
			
		||||
                .interpret(inventory, topology)
 | 
			
		||||
                .await
 | 
			
		||||
        } else {
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								harmony/src/modules/monitoring/scrape_target/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								harmony/src/modules/monitoring/scrape_target/mod.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
pub mod server;
 | 
			
		||||
							
								
								
									
										80
									
								
								harmony/src/modules/monitoring/scrape_target/server.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								harmony/src/modules/monitoring/scrape_target/server.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,80 @@
 | 
			
		||||
use std::net::IpAddr;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use kube::api::ObjectMeta;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    interpret::{InterpretError, Outcome},
 | 
			
		||||
    modules::monitoring::kube_prometheus::crd::{
 | 
			
		||||
        crd_alertmanager_config::CRDPrometheus,
 | 
			
		||||
        crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
 | 
			
		||||
    },
 | 
			
		||||
    topology::oberservability::monitoring::ScrapeTarget,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct Server {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub ip: IpAddr,
 | 
			
		||||
    pub auth: String,
 | 
			
		||||
    pub module: String,
 | 
			
		||||
    pub domain: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl ScrapeTarget<CRDPrometheus> for Server {
 | 
			
		||||
    async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let scrape_config_spec = ScrapeConfigSpec {
 | 
			
		||||
            static_configs: Some(vec![StaticConfig {
 | 
			
		||||
                targets: vec![self.ip.to_string()],
 | 
			
		||||
                labels: None,
 | 
			
		||||
            }]),
 | 
			
		||||
            scrape_interval: Some("2m".to_string()),
 | 
			
		||||
            kubernetes_sd_configs: None,
 | 
			
		||||
            http_sd_configs: None,
 | 
			
		||||
            file_sd_configs: None,
 | 
			
		||||
            dns_sd_configs: None,
 | 
			
		||||
            params: Some(Params {
 | 
			
		||||
                auth: Some(vec![self.auth.clone()]),
 | 
			
		||||
                module: Some(vec![self.module.clone()]),
 | 
			
		||||
            }),
 | 
			
		||||
            consul_sd_configs: None,
 | 
			
		||||
            relabel_configs: Some(vec![RelabelConfig {
 | 
			
		||||
                action: None,
 | 
			
		||||
                source_labels: Some(vec!["__address__".to_string()]),
 | 
			
		||||
                separator: None,
 | 
			
		||||
                target_label: Some("__param_target".to_string()),
 | 
			
		||||
                regex: None,
 | 
			
		||||
                replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
 | 
			
		||||
                modulus: None,
 | 
			
		||||
            }]),
 | 
			
		||||
            metric_relabel_configs: None,
 | 
			
		||||
            metrics_path: Some("/snmp".to_string()),
 | 
			
		||||
            scrape_timeout: Some("2m".to_string()),
 | 
			
		||||
            job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
 | 
			
		||||
            scheme: None,
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let scrape_config = ScrapeConfig {
 | 
			
		||||
            metadata: ObjectMeta {
 | 
			
		||||
                name: Some(self.name.clone()),
 | 
			
		||||
                namespace: Some(sender.namespace.clone()),
 | 
			
		||||
                ..Default::default()
 | 
			
		||||
            },
 | 
			
		||||
            spec: scrape_config_spec,
 | 
			
		||||
        };
 | 
			
		||||
        sender
 | 
			
		||||
            .client
 | 
			
		||||
            .apply(&scrape_config, Some(&sender.namespace.clone()))
 | 
			
		||||
            .await?;
 | 
			
		||||
        Ok(Outcome::success(format!(
 | 
			
		||||
            "installed scrape target {}",
 | 
			
		||||
            self.name.clone()
 | 
			
		||||
        )))
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
 | 
			
		||||
        Box::new(self.clone())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -28,7 +28,7 @@ pub struct OKDSetup03ControlPlaneScore {}
 | 
			
		||||
 | 
			
		||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
 | 
			
		||||
        Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
 | 
			
		||||
        Box::new(OKDSetup03ControlPlaneInterpret::new())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
@ -38,17 +38,15 @@ impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
pub struct OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
    score: OKDSetup03ControlPlaneScore,
 | 
			
		||||
    version: Version,
 | 
			
		||||
    status: InterpretStatus,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
    pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
 | 
			
		||||
    pub fn new() -> Self {
 | 
			
		||||
        let version = Version::from("1.0.0").unwrap();
 | 
			
		||||
        Self {
 | 
			
		||||
            version,
 | 
			
		||||
            score,
 | 
			
		||||
            status: InterpretStatus::QUEUED,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@ -159,7 +157,7 @@ impl OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
        }
 | 
			
		||||
        .to_string();
 | 
			
		||||
 | 
			
		||||
        debug!("[ControlPlane] iPXE content template:\n{}", content);
 | 
			
		||||
        debug!("[ControlPlane] iPXE content template:\n{content}");
 | 
			
		||||
 | 
			
		||||
        // Create and apply an iPXE boot file for each node.
 | 
			
		||||
        for node in nodes {
 | 
			
		||||
@ -189,16 +187,13 @@ impl OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
    /// Prompts the user to reboot the target control plane nodes.
 | 
			
		||||
    async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
 | 
			
		||||
        let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
 | 
			
		||||
        info!(
 | 
			
		||||
            "[ControlPlane] Requesting reboot for control plane nodes: {:?}",
 | 
			
		||||
            node_ids
 | 
			
		||||
        );
 | 
			
		||||
        info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
 | 
			
		||||
 | 
			
		||||
        let confirmation = inquire::Confirm::new(
 | 
			
		||||
                &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
 | 
			
		||||
        )
 | 
			
		||||
        .prompt()
 | 
			
		||||
        .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
 | 
			
		||||
        .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
 | 
			
		||||
 | 
			
		||||
        if !confirmation {
 | 
			
		||||
            return Err(InterpretError::new(
 | 
			
		||||
@ -208,19 +203,6 @@ impl OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Placeholder for automating network bonding configuration.
 | 
			
		||||
    async fn persist_network_bond(&self) -> Result<(), InterpretError> {
 | 
			
		||||
        // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
 | 
			
		||||
        info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
 | 
			
		||||
        inquire::Confirm::new(
 | 
			
		||||
            "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
 | 
			
		||||
        )
 | 
			
		||||
        .prompt()
 | 
			
		||||
        .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
@ -259,9 +241,6 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
 | 
			
		||||
        // 4. Reboot the nodes to start the OS installation.
 | 
			
		||||
        self.reboot_targets(&nodes).await?;
 | 
			
		||||
 | 
			
		||||
        // 5. Placeholder for post-boot network configuration (e.g., bonding).
 | 
			
		||||
        self.persist_network_bond().await?;
 | 
			
		||||
 | 
			
		||||
        // TODO: Implement a step to wait for the control plane nodes to join the cluster
 | 
			
		||||
        // and for the cluster operators to become available. This would be similar to
 | 
			
		||||
        // the `wait-for bootstrap-complete` command.
 | 
			
		||||
 | 
			
		||||
@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore {
 | 
			
		||||
            address: topology.bootstrap_host.ip.to_string(),
 | 
			
		||||
            port,
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        backend.dedup();
 | 
			
		||||
        backend
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										130
									
								
								harmony/src/modules/okd/bootstrap_persist_network_bond.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								harmony/src/modules/okd/bootstrap_persist_network_bond.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,130 @@
 | 
			
		||||
use crate::{
 | 
			
		||||
    data::Version,
 | 
			
		||||
    hardware::PhysicalHost,
 | 
			
		||||
    infra::inventory::InventoryRepositoryFactory,
 | 
			
		||||
    interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
 | 
			
		||||
    inventory::{HostRole, Inventory},
 | 
			
		||||
    modules::okd::host_network::HostNetworkConfigurationScore,
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::HAClusterTopology,
 | 
			
		||||
};
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use derive_new::new;
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
use log::info;
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
// -------------------------------------------------------------------------------------------------
 | 
			
		||||
// Persist Network Bond
 | 
			
		||||
// - Persist bonding via NMState
 | 
			
		||||
// - Persist port channels on the Switch
 | 
			
		||||
// -------------------------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize, new)]
 | 
			
		||||
pub struct OKDSetupPersistNetworkBondScore {}
 | 
			
		||||
 | 
			
		||||
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
 | 
			
		||||
        Box::new(OKDSetupPersistNetworkBondInterpet::new())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
        "OKDSetupPersistNetworkBondScore".to_string()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone)]
 | 
			
		||||
pub struct OKDSetupPersistNetworkBondInterpet {
 | 
			
		||||
    version: Version,
 | 
			
		||||
    status: InterpretStatus,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl OKDSetupPersistNetworkBondInterpet {
 | 
			
		||||
    pub fn new() -> Self {
 | 
			
		||||
        let version = Version::from("1.0.0").unwrap();
 | 
			
		||||
        Self {
 | 
			
		||||
            version,
 | 
			
		||||
            status: InterpretStatus::QUEUED,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /// Ensures that three physical hosts are discovered and available for the ControlPlane role.
 | 
			
		||||
    /// It will trigger discovery if not enough hosts are found.
 | 
			
		||||
    async fn get_nodes(
 | 
			
		||||
        &self,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        _topology: &HAClusterTopology,
 | 
			
		||||
    ) -> Result<Vec<PhysicalHost>, InterpretError> {
 | 
			
		||||
        const REQUIRED_HOSTS: usize = 3;
 | 
			
		||||
        let repo = InventoryRepositoryFactory::build().await?;
 | 
			
		||||
        let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
 | 
			
		||||
 | 
			
		||||
        if control_plane_hosts.len() < REQUIRED_HOSTS {
 | 
			
		||||
            Err(InterpretError::new(format!(
 | 
			
		||||
                "OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
 | 
			
		||||
                REQUIRED_HOSTS,
 | 
			
		||||
                control_plane_hosts.len()
 | 
			
		||||
            )))
 | 
			
		||||
        } else {
 | 
			
		||||
            // Take exactly the number of required hosts to ensure consistency.
 | 
			
		||||
            Ok(control_plane_hosts
 | 
			
		||||
                .into_iter()
 | 
			
		||||
                .take(REQUIRED_HOSTS)
 | 
			
		||||
                .collect())
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn persist_network_bond(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        topology: &HAClusterTopology,
 | 
			
		||||
        hosts: &Vec<PhysicalHost>,
 | 
			
		||||
    ) -> Result<(), InterpretError> {
 | 
			
		||||
        info!("Ensuring persistent bonding");
 | 
			
		||||
 | 
			
		||||
        let score = HostNetworkConfigurationScore {
 | 
			
		||||
            hosts: hosts.clone(),
 | 
			
		||||
        };
 | 
			
		||||
        score.interpret(inventory, topology).await?;
 | 
			
		||||
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        self.version.clone()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        self.status.clone()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        vec![]
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        topology: &HAClusterTopology,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let nodes = self.get_nodes(inventory, topology).await?;
 | 
			
		||||
 | 
			
		||||
        let res = self.persist_network_bond(inventory, topology, &nodes).await;
 | 
			
		||||
 | 
			
		||||
        match res {
 | 
			
		||||
            Ok(_) => Ok(Outcome::success(
 | 
			
		||||
                "Network bond successfully persisted".into(),
 | 
			
		||||
            )),
 | 
			
		||||
            Err(_) => Err(InterpretError::new(
 | 
			
		||||
                "Failed to persist network bond".to_string(),
 | 
			
		||||
            )),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										1
									
								
								harmony/src/modules/okd/crd/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								harmony/src/modules/okd/crd/mod.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
pub mod nmstate;
 | 
			
		||||
							
								
								
									
										322
									
								
								harmony/src/modules/okd/crd/nmstate.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										322
									
								
								harmony/src/modules/okd/crd/nmstate.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,322 @@
 | 
			
		||||
use std::collections::BTreeMap;
 | 
			
		||||
 | 
			
		||||
use kube::CustomResource;
 | 
			
		||||
use schemars::JsonSchema;
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use serde_json::Value;
 | 
			
		||||
 | 
			
		||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[kube(
 | 
			
		||||
    group = "nmstate.io",
 | 
			
		||||
    version = "v1",
 | 
			
		||||
    kind = "NMState",
 | 
			
		||||
    plural = "nmstates",
 | 
			
		||||
    namespaced = false
 | 
			
		||||
)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct NMStateSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub probe_configuration: Option<ProbeConfig>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Default for NMState {
 | 
			
		||||
    fn default() -> Self {
 | 
			
		||||
        Self {
 | 
			
		||||
            metadata: Default::default(),
 | 
			
		||||
            spec: NMStateSpec {
 | 
			
		||||
                probe_configuration: None,
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct ProbeConfig {
 | 
			
		||||
    pub dns: ProbeDns,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct ProbeDns {
 | 
			
		||||
    pub host: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[kube(
 | 
			
		||||
    group = "nmstate.io",
 | 
			
		||||
    version = "v1",
 | 
			
		||||
    kind = "NodeNetworkConfigurationPolicy",
 | 
			
		||||
    namespaced
 | 
			
		||||
)]
 | 
			
		||||
#[serde(rename_all = "camelCase")]
 | 
			
		||||
pub struct NodeNetworkConfigurationPolicySpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub node_selector: Option<BTreeMap<String, String>>,
 | 
			
		||||
    pub desired_state: DesiredStateSpec,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct DesiredStateSpec {
 | 
			
		||||
    pub interfaces: Vec<InterfaceSpec>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct InterfaceSpec {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub description: Option<String>,
 | 
			
		||||
    pub r#type: String,
 | 
			
		||||
    pub state: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mac_address: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub copy_mac_from: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mtu: Option<u32>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub controller: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ipv4: Option<IpStackSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ipv6: Option<IpStackSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ethernet: Option<EthernetSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub link_aggregation: Option<BondSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub vlan: Option<VlanSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub vxlan: Option<VxlanSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mac_vtap: Option<MacVtapSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mac_vlan: Option<MacVlanSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub infiniband: Option<InfinibandSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub linux_bridge: Option<LinuxBridgeSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ovs_bridge: Option<OvsBridgeSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ethtool: Option<EthtoolSpec>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct IpStackSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub enabled: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub dhcp: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub autoconf: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub address: Option<Vec<IpAddressSpec>>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub auto_dns: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub auto_gateway: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub auto_routes: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub dhcp_client_id: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub dhcp_duid: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct IpAddressSpec {
 | 
			
		||||
    pub ip: String,
 | 
			
		||||
    pub prefix_length: u8,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct EthernetSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub speed: Option<u32>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub duplex: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub auto_negotiation: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct BondSpec {
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
    pub ports: Vec<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub options: Option<BTreeMap<String, Value>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct VlanSpec {
 | 
			
		||||
    pub base_iface: String,
 | 
			
		||||
    pub id: u16,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub protocol: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct VxlanSpec {
 | 
			
		||||
    pub base_iface: String,
 | 
			
		||||
    pub id: u32,
 | 
			
		||||
    pub remote: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub local: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub learning: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub destination_port: Option<u16>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct MacVtapSpec {
 | 
			
		||||
    pub base_iface: String,
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub promiscuous: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct MacVlanSpec {
 | 
			
		||||
    pub base_iface: String,
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub promiscuous: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct InfinibandSpec {
 | 
			
		||||
    pub base_iface: String,
 | 
			
		||||
    pub pkey: String,
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct LinuxBridgeSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub options: Option<LinuxBridgeOptions>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ports: Option<Vec<LinuxBridgePort>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct LinuxBridgeOptions {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mac_ageing_time: Option<u32>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub multicast_snooping: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub stp: Option<StpOptions>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct StpOptions {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub enabled: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub forward_delay: Option<u16>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub hello_time: Option<u16>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub max_age: Option<u16>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub priority: Option<u16>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct LinuxBridgePort {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub vlan: Option<LinuxBridgePortVlan>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct LinuxBridgePortVlan {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mode: Option<String>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub trunk_tags: Option<Vec<VlanTag>>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub tag: Option<u16>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub enable_native: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct VlanTag {
 | 
			
		||||
    pub id: u16,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub id_range: Option<VlanIdRange>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct VlanIdRange {
 | 
			
		||||
    pub min: u16,
 | 
			
		||||
    pub max: u16,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct OvsBridgeSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub options: Option<OvsBridgeOptions>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub ports: Option<Vec<OvsPortSpec>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct OvsBridgeOptions {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub stp: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub rstp: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mcast_snooping_enable: Option<bool>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct OvsPortSpec {
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub link_aggregation: Option<BondSpec>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub vlan: Option<LinuxBridgePortVlan>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub r#type: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct EthtoolSpec {
 | 
			
		||||
    // TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
 | 
			
		||||
#[serde(rename_all = "kebab-case")]
 | 
			
		||||
pub struct EthtoolFecSpec {
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub auto: Option<bool>,
 | 
			
		||||
    #[serde(skip_serializing_if = "Option::is_none")]
 | 
			
		||||
    pub mode: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										489
									
								
								harmony/src/modules/okd/host_network.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										489
									
								
								harmony/src/modules/okd/host_network.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,489 @@
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
use log::{debug, info};
 | 
			
		||||
use serde::Serialize;
 | 
			
		||||
 | 
			
		||||
use crate::{
 | 
			
		||||
    data::Version,
 | 
			
		||||
    hardware::PhysicalHost,
 | 
			
		||||
    interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
 | 
			
		||||
    inventory::Inventory,
 | 
			
		||||
    score::Score,
 | 
			
		||||
    topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct HostNetworkConfigurationScore {
 | 
			
		||||
    pub hosts: Vec<PhysicalHost>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
 | 
			
		||||
    fn name(&self) -> String {
 | 
			
		||||
        "HostNetworkConfigurationScore".into()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn Interpret<T>> {
 | 
			
		||||
        Box::new(HostNetworkConfigurationInterpret {
 | 
			
		||||
            score: self.clone(),
 | 
			
		||||
        })
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub struct HostNetworkConfigurationInterpret {
 | 
			
		||||
    score: HostNetworkConfigurationScore,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl HostNetworkConfigurationInterpret {
 | 
			
		||||
    async fn configure_network_for_host<T: Topology + Switch>(
 | 
			
		||||
        &self,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
        host: &PhysicalHost,
 | 
			
		||||
        current_host: &usize,
 | 
			
		||||
        total_hosts: &usize,
 | 
			
		||||
    ) -> Result<HostNetworkConfig, InterpretError> {
 | 
			
		||||
        if host.network.is_empty() {
 | 
			
		||||
            info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
 | 
			
		||||
            return Ok(HostNetworkConfig {
 | 
			
		||||
                host_id: host.id.clone(),
 | 
			
		||||
                switch_ports: vec![],
 | 
			
		||||
            });
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let switch_ports = self
 | 
			
		||||
            .collect_switch_ports_for_host(topology, host, current_host, total_hosts)
 | 
			
		||||
            .await?;
 | 
			
		||||
 | 
			
		||||
        let config = HostNetworkConfig {
 | 
			
		||||
            host_id: host.id.clone(),
 | 
			
		||||
            switch_ports,
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        if !config.switch_ports.is_empty() {
 | 
			
		||||
            info!(
 | 
			
		||||
                "[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
 | 
			
		||||
                config.switch_ports.len(),
 | 
			
		||||
                host.network.len()
 | 
			
		||||
            );
 | 
			
		||||
 | 
			
		||||
            info!("[Host {current_host}/{total_hosts}] Configuring host network...");
 | 
			
		||||
            topology
 | 
			
		||||
                .configure_host_network(&config)
 | 
			
		||||
                .await
 | 
			
		||||
                .map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
 | 
			
		||||
        } else {
 | 
			
		||||
            info!(
 | 
			
		||||
                "[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
 | 
			
		||||
                host.network.len()
 | 
			
		||||
            );
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(config)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn collect_switch_ports_for_host<T: Topology + Switch>(
 | 
			
		||||
        &self,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
        host: &PhysicalHost,
 | 
			
		||||
        current_host: &usize,
 | 
			
		||||
        total_hosts: &usize,
 | 
			
		||||
    ) -> Result<Vec<SwitchPort>, InterpretError> {
 | 
			
		||||
        let mut switch_ports = vec![];
 | 
			
		||||
 | 
			
		||||
        if host.network.is_empty() {
 | 
			
		||||
            return Ok(switch_ports);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
 | 
			
		||||
        for network_interface in &host.network {
 | 
			
		||||
            let mac_address = network_interface.mac_address;
 | 
			
		||||
 | 
			
		||||
            match topology.get_port_for_mac_address(&mac_address).await {
 | 
			
		||||
                Ok(Some(port)) => {
 | 
			
		||||
                    info!(
 | 
			
		||||
                        "[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
 | 
			
		||||
                    );
 | 
			
		||||
                    switch_ports.push(SwitchPort {
 | 
			
		||||
                        interface: NetworkInterface {
 | 
			
		||||
                            name: network_interface.name.clone(),
 | 
			
		||||
                            mac_address,
 | 
			
		||||
                            speed_mbps: network_interface.speed_mbps,
 | 
			
		||||
                            mtu: network_interface.mtu,
 | 
			
		||||
                        },
 | 
			
		||||
                        port,
 | 
			
		||||
                    });
 | 
			
		||||
                }
 | 
			
		||||
                Ok(None) => debug!("No port found for '{mac_address}', skipping"),
 | 
			
		||||
                Err(e) => {
 | 
			
		||||
                    return Err(InterpretError::new(format!(
 | 
			
		||||
                        "Failed to get port for host '{}': {}",
 | 
			
		||||
                        host.id, e
 | 
			
		||||
                    )));
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Ok(switch_ports)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
 | 
			
		||||
        let mut report = vec![
 | 
			
		||||
            "Network Configuration Report".to_string(),
 | 
			
		||||
            "------------------------------------------------------------------".to_string(),
 | 
			
		||||
        ];
 | 
			
		||||
 | 
			
		||||
        for config in configs {
 | 
			
		||||
            let host = self
 | 
			
		||||
                .score
 | 
			
		||||
                .hosts
 | 
			
		||||
                .iter()
 | 
			
		||||
                .find(|h| h.id == config.host_id)
 | 
			
		||||
                .unwrap();
 | 
			
		||||
 | 
			
		||||
            println!("[Host] {host}");
 | 
			
		||||
 | 
			
		||||
            if config.switch_ports.is_empty() {
 | 
			
		||||
                report.push(format!(
 | 
			
		||||
                    "⏭️ Host {}: SKIPPED (No matching switch ports found)",
 | 
			
		||||
                    config.host_id
 | 
			
		||||
                ));
 | 
			
		||||
            } else {
 | 
			
		||||
                let mappings: Vec<String> = config
 | 
			
		||||
                    .switch_ports
 | 
			
		||||
                    .iter()
 | 
			
		||||
                    .map(|p| format!("[{} -> {}]", p.interface.name, p.port))
 | 
			
		||||
                    .collect();
 | 
			
		||||
 | 
			
		||||
                report.push(format!(
 | 
			
		||||
                    "✅ Host {}: Bonded {} port(s) {}",
 | 
			
		||||
                    config.host_id,
 | 
			
		||||
                    config.switch_ports.len(),
 | 
			
		||||
                    mappings.join(", ")
 | 
			
		||||
                ));
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        report
 | 
			
		||||
            .push("------------------------------------------------------------------".to_string());
 | 
			
		||||
        report
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        InterpretName::Custom("HostNetworkConfigurationInterpret")
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_status(&self) -> InterpretStatus {
 | 
			
		||||
        todo!()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_children(&self) -> Vec<Id> {
 | 
			
		||||
        vec![]
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
        _inventory: &Inventory,
 | 
			
		||||
        topology: &T,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        if self.score.hosts.is_empty() {
 | 
			
		||||
            return Ok(Outcome::noop("No hosts to configure".into()));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let host_count = self.score.hosts.len();
 | 
			
		||||
        info!("Started network configuration for {host_count} host(s)...",);
 | 
			
		||||
 | 
			
		||||
        info!("Setting up switch with sane defaults...");
 | 
			
		||||
        topology
 | 
			
		||||
            .setup_switch()
 | 
			
		||||
            .await
 | 
			
		||||
            .map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
 | 
			
		||||
        info!("Switch ready");
 | 
			
		||||
 | 
			
		||||
        let mut current_host = 1;
 | 
			
		||||
        let mut host_configurations = vec![];
 | 
			
		||||
 | 
			
		||||
        for host in &self.score.hosts {
 | 
			
		||||
            let host_configuration = self
 | 
			
		||||
                .configure_network_for_host(topology, host, ¤t_host, &host_count)
 | 
			
		||||
                .await?;
 | 
			
		||||
 | 
			
		||||
            host_configurations.push(host_configuration);
 | 
			
		||||
            current_host += 1;
 | 
			
		||||
        }
 | 
			
		||||
        if current_host > 1 {
 | 
			
		||||
            let details = self.format_host_configuration(host_configurations);
 | 
			
		||||
 | 
			
		||||
            Ok(Outcome::success_with_details(
 | 
			
		||||
                format!(
 | 
			
		||||
                    "Configured {}/{} host(s)",
 | 
			
		||||
                    current_host - 1,
 | 
			
		||||
                    self.score.hosts.len()
 | 
			
		||||
                ),
 | 
			
		||||
                details,
 | 
			
		||||
            ))
 | 
			
		||||
        } else {
 | 
			
		||||
            Ok(Outcome::noop("No hosts configured".into()))
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod tests {
 | 
			
		||||
    use assertor::*;
 | 
			
		||||
    use harmony_types::{net::MacAddress, switch::PortLocation};
 | 
			
		||||
    use lazy_static::lazy_static;
 | 
			
		||||
 | 
			
		||||
    use crate::{
 | 
			
		||||
        hardware::HostCategory,
 | 
			
		||||
        topology::{
 | 
			
		||||
            HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
 | 
			
		||||
        },
 | 
			
		||||
    };
 | 
			
		||||
    use std::{
 | 
			
		||||
        str::FromStr,
 | 
			
		||||
        sync::{Arc, Mutex},
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    use super::*;
 | 
			
		||||
 | 
			
		||||
    lazy_static! {
 | 
			
		||||
        pub static ref HOST_ID: Id = Id::from_str("host-1").unwrap();
 | 
			
		||||
        pub static ref ANOTHER_HOST_ID: Id = Id::from_str("host-2").unwrap();
 | 
			
		||||
        pub static ref EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
 | 
			
		||||
            mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F1".to_string()).unwrap(),
 | 
			
		||||
            name: "interface-1".into(),
 | 
			
		||||
            speed_mbps: None,
 | 
			
		||||
            mtu: 1,
 | 
			
		||||
        };
 | 
			
		||||
        pub static ref ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
 | 
			
		||||
            mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F2".to_string()).unwrap(),
 | 
			
		||||
            name: "interface-2".into(),
 | 
			
		||||
            speed_mbps: None,
 | 
			
		||||
            mtu: 1,
 | 
			
		||||
        };
 | 
			
		||||
        pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
 | 
			
		||||
            mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
 | 
			
		||||
            name: "unknown-interface".into(),
 | 
			
		||||
            speed_mbps: None,
 | 
			
		||||
            mtu: 1,
 | 
			
		||||
        };
 | 
			
		||||
        pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
 | 
			
		||||
        pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn should_setup_switch() {
 | 
			
		||||
        let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
 | 
			
		||||
        let score = given_score(vec![host]);
 | 
			
		||||
        let topology = TopologyWithSwitch::new();
 | 
			
		||||
 | 
			
		||||
        let _ = score.interpret(&Inventory::empty(), &topology).await;
 | 
			
		||||
 | 
			
		||||
        let switch_setup = topology.switch_setup.lock().unwrap();
 | 
			
		||||
        assert_that!(*switch_setup).is_true();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
 | 
			
		||||
        let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
 | 
			
		||||
        let score = given_score(vec![host]);
 | 
			
		||||
        let topology = TopologyWithSwitch::new();
 | 
			
		||||
 | 
			
		||||
        let _ = score.interpret(&Inventory::empty(), &topology).await;
 | 
			
		||||
 | 
			
		||||
        let configured_host_networks = topology.configured_host_networks.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_host_networks).contains_exactly(vec![(
 | 
			
		||||
            HOST_ID.clone(),
 | 
			
		||||
            HostNetworkConfig {
 | 
			
		||||
                host_id: HOST_ID.clone(),
 | 
			
		||||
                switch_ports: vec![SwitchPort {
 | 
			
		||||
                    interface: EXISTING_INTERFACE.clone(),
 | 
			
		||||
                    port: PORT.clone(),
 | 
			
		||||
                }],
 | 
			
		||||
            },
 | 
			
		||||
        )]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
 | 
			
		||||
        let score = given_score(vec![given_host(
 | 
			
		||||
            &HOST_ID,
 | 
			
		||||
            vec![
 | 
			
		||||
                EXISTING_INTERFACE.clone(),
 | 
			
		||||
                ANOTHER_EXISTING_INTERFACE.clone(),
 | 
			
		||||
            ],
 | 
			
		||||
        )]);
 | 
			
		||||
        let topology = TopologyWithSwitch::new();
 | 
			
		||||
 | 
			
		||||
        let _ = score.interpret(&Inventory::empty(), &topology).await;
 | 
			
		||||
 | 
			
		||||
        let configured_host_networks = topology.configured_host_networks.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_host_networks).contains_exactly(vec![(
 | 
			
		||||
            HOST_ID.clone(),
 | 
			
		||||
            HostNetworkConfig {
 | 
			
		||||
                host_id: HOST_ID.clone(),
 | 
			
		||||
                switch_ports: vec![
 | 
			
		||||
                    SwitchPort {
 | 
			
		||||
                        interface: EXISTING_INTERFACE.clone(),
 | 
			
		||||
                        port: PORT.clone(),
 | 
			
		||||
                    },
 | 
			
		||||
                    SwitchPort {
 | 
			
		||||
                        interface: ANOTHER_EXISTING_INTERFACE.clone(),
 | 
			
		||||
                        port: ANOTHER_PORT.clone(),
 | 
			
		||||
                    },
 | 
			
		||||
                ],
 | 
			
		||||
            },
 | 
			
		||||
        )]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn multiple_hosts_should_create_one_bond_per_host() {
 | 
			
		||||
        let score = given_score(vec![
 | 
			
		||||
            given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
 | 
			
		||||
            given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
 | 
			
		||||
        ]);
 | 
			
		||||
        let topology = TopologyWithSwitch::new();
 | 
			
		||||
 | 
			
		||||
        let _ = score.interpret(&Inventory::empty(), &topology).await;
 | 
			
		||||
 | 
			
		||||
        let configured_host_networks = topology.configured_host_networks.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_host_networks).contains_exactly(vec![
 | 
			
		||||
            (
 | 
			
		||||
                HOST_ID.clone(),
 | 
			
		||||
                HostNetworkConfig {
 | 
			
		||||
                    host_id: HOST_ID.clone(),
 | 
			
		||||
                    switch_ports: vec![SwitchPort {
 | 
			
		||||
                        interface: EXISTING_INTERFACE.clone(),
 | 
			
		||||
                        port: PORT.clone(),
 | 
			
		||||
                    }],
 | 
			
		||||
                },
 | 
			
		||||
            ),
 | 
			
		||||
            (
 | 
			
		||||
                ANOTHER_HOST_ID.clone(),
 | 
			
		||||
                HostNetworkConfig {
 | 
			
		||||
                    host_id: ANOTHER_HOST_ID.clone(),
 | 
			
		||||
                    switch_ports: vec![SwitchPort {
 | 
			
		||||
                        interface: ANOTHER_EXISTING_INTERFACE.clone(),
 | 
			
		||||
                        port: ANOTHER_PORT.clone(),
 | 
			
		||||
                    }],
 | 
			
		||||
                },
 | 
			
		||||
            ),
 | 
			
		||||
        ]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[tokio::test]
 | 
			
		||||
    async fn port_not_found_for_mac_address_should_not_configure_interface() {
 | 
			
		||||
        let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
 | 
			
		||||
        let topology = TopologyWithSwitch::new_port_not_found();
 | 
			
		||||
 | 
			
		||||
        let _ = score.interpret(&Inventory::empty(), &topology).await;
 | 
			
		||||
 | 
			
		||||
        let configured_host_networks = topology.configured_host_networks.lock().unwrap();
 | 
			
		||||
        assert_that!(*configured_host_networks).is_empty();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
 | 
			
		||||
        HostNetworkConfigurationScore { hosts }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_host(id: &Id, network_interfaces: Vec<NetworkInterface>) -> PhysicalHost {
 | 
			
		||||
        let network = network_interfaces.iter().map(given_interface).collect();
 | 
			
		||||
 | 
			
		||||
        PhysicalHost {
 | 
			
		||||
            id: id.clone(),
 | 
			
		||||
            category: HostCategory::Server,
 | 
			
		||||
            network,
 | 
			
		||||
            storage: vec![],
 | 
			
		||||
            labels: vec![],
 | 
			
		||||
            memory_modules: vec![],
 | 
			
		||||
            cpus: vec![],
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_interface(
 | 
			
		||||
        interface: &NetworkInterface,
 | 
			
		||||
    ) -> harmony_inventory_agent::hwinfo::NetworkInterface {
 | 
			
		||||
        harmony_inventory_agent::hwinfo::NetworkInterface {
 | 
			
		||||
            name: interface.name.clone(),
 | 
			
		||||
            mac_address: interface.mac_address,
 | 
			
		||||
            speed_mbps: interface.speed_mbps,
 | 
			
		||||
            is_up: true,
 | 
			
		||||
            mtu: interface.mtu,
 | 
			
		||||
            ipv4_addresses: vec![],
 | 
			
		||||
            ipv6_addresses: vec![],
 | 
			
		||||
            driver: "driver".into(),
 | 
			
		||||
            firmware_version: None,
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    struct TopologyWithSwitch {
 | 
			
		||||
        available_ports: Arc<Mutex<Vec<PortLocation>>>,
 | 
			
		||||
        configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
 | 
			
		||||
        switch_setup: Arc<Mutex<bool>>,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    impl TopologyWithSwitch {
 | 
			
		||||
        fn new() -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
 | 
			
		||||
                configured_host_networks: Arc::new(Mutex::new(vec![])),
 | 
			
		||||
                switch_setup: Arc::new(Mutex::new(false)),
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        fn new_port_not_found() -> Self {
 | 
			
		||||
            Self {
 | 
			
		||||
                available_ports: Arc::new(Mutex::new(vec![])),
 | 
			
		||||
                configured_host_networks: Arc::new(Mutex::new(vec![])),
 | 
			
		||||
                switch_setup: Arc::new(Mutex::new(false)),
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[async_trait]
 | 
			
		||||
    impl Topology for TopologyWithSwitch {
 | 
			
		||||
        fn name(&self) -> &str {
 | 
			
		||||
            "SwitchWithPortTopology"
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
 | 
			
		||||
            Ok(PreparationOutcome::Success { details: "".into() })
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[async_trait]
 | 
			
		||||
    impl Switch for TopologyWithSwitch {
 | 
			
		||||
        async fn setup_switch(&self) -> Result<(), SwitchError> {
 | 
			
		||||
            let mut switch_configured = self.switch_setup.lock().unwrap();
 | 
			
		||||
            *switch_configured = true;
 | 
			
		||||
            Ok(())
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn get_port_for_mac_address(
 | 
			
		||||
            &self,
 | 
			
		||||
            _mac_address: &MacAddress,
 | 
			
		||||
        ) -> Result<Option<PortLocation>, SwitchError> {
 | 
			
		||||
            let mut ports = self.available_ports.lock().unwrap();
 | 
			
		||||
            if ports.is_empty() {
 | 
			
		||||
                return Ok(None);
 | 
			
		||||
            }
 | 
			
		||||
            Ok(Some(ports.remove(0)))
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        async fn configure_host_network(
 | 
			
		||||
            &self,
 | 
			
		||||
            config: &HostNetworkConfig,
 | 
			
		||||
        ) -> Result<(), SwitchError> {
 | 
			
		||||
            let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
 | 
			
		||||
            configured_host_networks.push((config.host_id.clone(), config.clone()));
 | 
			
		||||
 | 
			
		||||
            Ok(())
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -50,7 +50,7 @@
 | 
			
		||||
use crate::{
 | 
			
		||||
    modules::okd::{
 | 
			
		||||
        OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
 | 
			
		||||
        OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
 | 
			
		||||
        OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
 | 
			
		||||
        bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
 | 
			
		||||
    },
 | 
			
		||||
    score::Score,
 | 
			
		||||
@ -65,6 +65,7 @@ impl OKDInstallationPipeline {
 | 
			
		||||
            Box::new(OKDSetup01InventoryScore::new()),
 | 
			
		||||
            Box::new(OKDSetup02BootstrapScore::new()),
 | 
			
		||||
            Box::new(OKDSetup03ControlPlaneScore::new()),
 | 
			
		||||
            Box::new(OKDSetupPersistNetworkBondScore::new()),
 | 
			
		||||
            Box::new(OKDSetup04WorkersScore::new()),
 | 
			
		||||
            Box::new(OKDSetup05SanityCheckScore::new()),
 | 
			
		||||
            Box::new(OKDSetup06InstallationReportScore::new()),
 | 
			
		||||
 | 
			
		||||
@ -6,6 +6,7 @@ mod bootstrap_05_sanity_check;
 | 
			
		||||
mod bootstrap_06_installation_report;
 | 
			
		||||
pub mod bootstrap_dhcp;
 | 
			
		||||
pub mod bootstrap_load_balancer;
 | 
			
		||||
mod bootstrap_persist_network_bond;
 | 
			
		||||
pub mod dhcp;
 | 
			
		||||
pub mod dns;
 | 
			
		||||
pub mod installation;
 | 
			
		||||
@ -19,3 +20,6 @@ pub use bootstrap_03_control_plane::*;
 | 
			
		||||
pub use bootstrap_04_workers::*;
 | 
			
		||||
pub use bootstrap_05_sanity_check::*;
 | 
			
		||||
pub use bootstrap_06_installation_report::*;
 | 
			
		||||
pub use bootstrap_persist_network_bond::*;
 | 
			
		||||
pub mod crd;
 | 
			
		||||
pub mod host_network;
 | 
			
		||||
 | 
			
		||||
@ -12,7 +12,8 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::C
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::crd_default_rules::build_default_application_rules;
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::crd_grafana::{
 | 
			
		||||
    Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
 | 
			
		||||
    GrafanaDatasourceSpec, GrafanaSpec,
 | 
			
		||||
    GrafanaDatasourceJsonData, GrafanaDatasourceSpec, GrafanaSecretKeyRef, GrafanaSpec,
 | 
			
		||||
    GrafanaValueFrom, GrafanaValueSource,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::crd_prometheus_rules::{
 | 
			
		||||
    PrometheusRule, PrometheusRuleSpec, RuleGroup,
 | 
			
		||||
@ -39,7 +40,7 @@ use crate::{
 | 
			
		||||
};
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
 | 
			
		||||
use super::prometheus::PrometheusApplicationMonitoring;
 | 
			
		||||
use super::prometheus::PrometheusMonitoring;
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, Serialize)]
 | 
			
		||||
pub struct K8sPrometheusCRDAlertingScore {
 | 
			
		||||
@ -49,7 +50,7 @@ pub struct K8sPrometheusCRDAlertingScore {
 | 
			
		||||
    pub prometheus_rules: Vec<RuleGroup>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
 | 
			
		||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Score<T>
 | 
			
		||||
    for K8sPrometheusCRDAlertingScore
 | 
			
		||||
{
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
 | 
			
		||||
@ -75,7 +76,7 @@ pub struct K8sPrometheusCRDAlertingInterpret {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
 | 
			
		||||
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus>> Interpret<T>
 | 
			
		||||
    for K8sPrometheusCRDAlertingInterpret
 | 
			
		||||
{
 | 
			
		||||
    async fn execute(
 | 
			
		||||
@ -466,10 +467,13 @@ impl K8sPrometheusCRDAlertingInterpret {
 | 
			
		||||
            match_labels: label.clone(),
 | 
			
		||||
            match_expressions: vec![],
 | 
			
		||||
        };
 | 
			
		||||
        let mut json_data = BTreeMap::new();
 | 
			
		||||
        json_data.insert("timeInterval".to_string(), "5s".to_string());
 | 
			
		||||
        let namespace = self.sender.namespace.clone();
 | 
			
		||||
 | 
			
		||||
        let json_data = GrafanaDatasourceJsonData {
 | 
			
		||||
            time_interval: Some("5s".to_string()),
 | 
			
		||||
            http_header_name1: None,
 | 
			
		||||
            tls_skip_verify: Some(true),
 | 
			
		||||
            oauth_pass_thru: Some(true),
 | 
			
		||||
        };
 | 
			
		||||
        let json = build_default_dashboard(&namespace);
 | 
			
		||||
 | 
			
		||||
        let graf_data_source = GrafanaDatasource {
 | 
			
		||||
@ -495,7 +499,11 @@ impl K8sPrometheusCRDAlertingInterpret {
 | 
			
		||||
                        "http://prometheus-operated.{}.svc.cluster.local:9090",
 | 
			
		||||
                        self.sender.namespace.clone()
 | 
			
		||||
                    ),
 | 
			
		||||
                    secure_json_data: None,
 | 
			
		||||
                    is_default: None,
 | 
			
		||||
                    editable: None,
 | 
			
		||||
                },
 | 
			
		||||
                values_from: None,
 | 
			
		||||
            },
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
@ -516,7 +524,9 @@ impl K8sPrometheusCRDAlertingInterpret {
 | 
			
		||||
            spec: GrafanaDashboardSpec {
 | 
			
		||||
                resync_period: Some("30s".to_string()),
 | 
			
		||||
                instance_selector: labels.clone(),
 | 
			
		||||
                json,
 | 
			
		||||
                json: Some(json),
 | 
			
		||||
                grafana_com: None,
 | 
			
		||||
                datasources: None,
 | 
			
		||||
            },
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -9,11 +9,17 @@ use crate::{
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait PrometheusApplicationMonitoring<S: AlertSender> {
 | 
			
		||||
pub trait PrometheusMonitoring<S: AlertSender> {
 | 
			
		||||
    async fn install_prometheus(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &S,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
        receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError>;
 | 
			
		||||
 | 
			
		||||
    async fn ensure_prometheus_operator(
 | 
			
		||||
        &self,
 | 
			
		||||
        sender: &S,
 | 
			
		||||
        inventory: &Inventory,
 | 
			
		||||
    ) -> Result<PreparationOutcome, PreparationError>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -12,9 +12,6 @@ use std::process::Command;
 | 
			
		||||
use crate::modules::k8s::ingress::{K8sIngressScore, PathType};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::grafana_default_dashboard::build_default_dashboard;
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanagers::{
 | 
			
		||||
    Alertmanager, AlertmanagerSpec,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_grafana::{
 | 
			
		||||
    Grafana, GrafanaDashboard, GrafanaDashboardSpec, GrafanaDatasource, GrafanaDatasourceConfig,
 | 
			
		||||
    GrafanaDatasourceSpec, GrafanaSpec,
 | 
			
		||||
@ -25,13 +22,8 @@ use crate::modules::monitoring::kube_prometheus::crd::rhob_monitoring_stack::{
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheus_rules::{
 | 
			
		||||
    PrometheusRule, PrometheusRuleSpec, RuleGroup,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
 | 
			
		||||
    AlertmanagerEndpoints, LabelSelector, PrometheusSpec, PrometheusSpecAlerting,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
 | 
			
		||||
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_role::{
 | 
			
		||||
    build_prom_role, build_prom_rolebinding, build_prom_service_account,
 | 
			
		||||
};
 | 
			
		||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_service_monitor::{
 | 
			
		||||
    ServiceMonitor, ServiceMonitorSpec,
 | 
			
		||||
};
 | 
			
		||||
@ -46,7 +38,7 @@ use crate::{
 | 
			
		||||
};
 | 
			
		||||
use harmony_types::id::Id;
 | 
			
		||||
 | 
			
		||||
use super::prometheus::PrometheusApplicationMonitoring;
 | 
			
		||||
use super::prometheus::PrometheusMonitoring;
 | 
			
		||||
 | 
			
		||||
#[derive(Clone, Debug, Serialize)]
 | 
			
		||||
pub struct RHOBAlertingScore {
 | 
			
		||||
@ -56,8 +48,8 @@ pub struct RHOBAlertingScore {
 | 
			
		||||
    pub prometheus_rules: Vec<RuleGroup>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
 | 
			
		||||
    Score<T> for RHOBAlertingScore
 | 
			
		||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Score<T>
 | 
			
		||||
    for RHOBAlertingScore
 | 
			
		||||
{
 | 
			
		||||
    fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> {
 | 
			
		||||
        Box::new(RHOBAlertingInterpret {
 | 
			
		||||
@ -82,8 +74,8 @@ pub struct RHOBAlertingInterpret {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
impl<T: Topology + K8sclient + Ingress + PrometheusApplicationMonitoring<RHOBObservability>>
 | 
			
		||||
    Interpret<T> for RHOBAlertingInterpret
 | 
			
		||||
impl<T: Topology + K8sclient + Ingress + PrometheusMonitoring<RHOBObservability>> Interpret<T>
 | 
			
		||||
    for RHOBAlertingInterpret
 | 
			
		||||
{
 | 
			
		||||
    async fn execute(
 | 
			
		||||
        &self,
 | 
			
		||||
 | 
			
		||||
@ -4,7 +4,7 @@ use std::{
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
use log::{info, warn};
 | 
			
		||||
use log::{debug, warn};
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
use tokio::time::sleep;
 | 
			
		||||
 | 
			
		||||
@ -19,8 +19,8 @@ use harmony_types::id::Id;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug, Clone, Serialize)]
 | 
			
		||||
pub struct CephRemoveOsd {
 | 
			
		||||
    osd_deployment_name: String,
 | 
			
		||||
    rook_ceph_namespace: String,
 | 
			
		||||
    pub osd_deployment_name: String,
 | 
			
		||||
    pub rook_ceph_namespace: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<T: Topology + K8sclient> Score<T> for CephRemoveOsd {
 | 
			
		||||
@ -54,18 +54,17 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
 | 
			
		||||
        self.verify_deployment_scaled(client.clone()).await?;
 | 
			
		||||
        self.delete_deployment(client.clone()).await?;
 | 
			
		||||
        self.verify_deployment_deleted(client.clone()).await?;
 | 
			
		||||
        let osd_id_full = self.get_ceph_osd_id().unwrap();
 | 
			
		||||
        self.purge_ceph_osd(client.clone(), &osd_id_full).await?;
 | 
			
		||||
        self.verify_ceph_osd_removal(client.clone(), &osd_id_full)
 | 
			
		||||
            .await?;
 | 
			
		||||
        self.purge_ceph_osd(client.clone()).await?;
 | 
			
		||||
        self.verify_ceph_osd_removal(client.clone()).await?;
 | 
			
		||||
 | 
			
		||||
        let osd_id_full = self.get_ceph_osd_id().unwrap();
 | 
			
		||||
        Ok(Outcome::success(format!(
 | 
			
		||||
            "Successfully removed OSD {} from rook-ceph cluster by deleting deployment {}",
 | 
			
		||||
            osd_id_full, self.score.osd_deployment_name
 | 
			
		||||
        )))
 | 
			
		||||
    }
 | 
			
		||||
    fn get_name(&self) -> InterpretName {
 | 
			
		||||
        todo!()
 | 
			
		||||
        InterpretName::CephRemoveOsd
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn get_version(&self) -> Version {
 | 
			
		||||
@ -82,7 +81,7 @@ impl<T: Topology + K8sclient> Interpret<T> for CephRemoveOsdInterpret {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl CephRemoveOsdInterpret {
 | 
			
		||||
    pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
 | 
			
		||||
    pub fn get_ceph_osd_id_numeric(&self) -> Result<String, InterpretError> {
 | 
			
		||||
        let osd_id_numeric = self
 | 
			
		||||
            .score
 | 
			
		||||
            .osd_deployment_name
 | 
			
		||||
@ -94,9 +93,14 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
                    self.score.osd_deployment_name
 | 
			
		||||
                ))
 | 
			
		||||
            })?;
 | 
			
		||||
        Ok(osd_id_numeric.to_string())
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn get_ceph_osd_id(&self) -> Result<String, InterpretError> {
 | 
			
		||||
        let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
 | 
			
		||||
        let osd_id_full = format!("osd.{}", osd_id_numeric);
 | 
			
		||||
 | 
			
		||||
        info!(
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Targeting Ceph OSD: {} (parsed from deployment {})",
 | 
			
		||||
            osd_id_full, self.score.osd_deployment_name
 | 
			
		||||
        );
 | 
			
		||||
@ -108,6 +112,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        debug!("verifying toolbox exists");
 | 
			
		||||
        let toolbox_dep = "rook-ceph-tools".to_string();
 | 
			
		||||
 | 
			
		||||
        match client
 | 
			
		||||
@ -149,7 +154,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        info!(
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Scaling down OSD deployment: {}",
 | 
			
		||||
            self.score.osd_deployment_name
 | 
			
		||||
        );
 | 
			
		||||
@ -172,7 +177,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let (timeout, interval, start) = self.build_timer();
 | 
			
		||||
 | 
			
		||||
        info!("Waiting for OSD deployment to scale down to 0 replicas");
 | 
			
		||||
        debug!("Waiting for OSD deployment to scale down to 0 replicas");
 | 
			
		||||
        loop {
 | 
			
		||||
            let dep = client
 | 
			
		||||
                .get_deployment(
 | 
			
		||||
@ -180,11 +185,9 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
                    Some(&self.score.rook_ceph_namespace),
 | 
			
		||||
                )
 | 
			
		||||
                .await?;
 | 
			
		||||
 | 
			
		||||
            if let Some(deployment) = dep {
 | 
			
		||||
                if let Some(status) = deployment.status {
 | 
			
		||||
                    if status.replicas.unwrap_or(1) == 0 && status.ready_replicas.unwrap_or(1) == 0
 | 
			
		||||
                    {
 | 
			
		||||
                    if status.replicas == None && status.ready_replicas == None {
 | 
			
		||||
                        return Ok(Outcome::success(
 | 
			
		||||
                            "Deployment successfully scaled down.".to_string(),
 | 
			
		||||
                        ));
 | 
			
		||||
@ -212,7 +215,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        info!(
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Deleting OSD deployment: {}",
 | 
			
		||||
            self.score.osd_deployment_name
 | 
			
		||||
        );
 | 
			
		||||
@ -234,7 +237,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let (timeout, interval, start) = self.build_timer();
 | 
			
		||||
 | 
			
		||||
        info!("Waiting for OSD deployment to scale down to 0 replicas");
 | 
			
		||||
        debug!("Verifying OSD deployment deleted");
 | 
			
		||||
        loop {
 | 
			
		||||
            let dep = client
 | 
			
		||||
                .get_deployment(
 | 
			
		||||
@ -244,7 +247,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
                .await?;
 | 
			
		||||
 | 
			
		||||
            if dep.is_none() {
 | 
			
		||||
                info!(
 | 
			
		||||
                debug!(
 | 
			
		||||
                    "Deployment {} successfully deleted.",
 | 
			
		||||
                    self.score.osd_deployment_name
 | 
			
		||||
                );
 | 
			
		||||
@ -276,12 +279,10 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
        Ok(tree)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn purge_ceph_osd(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
        osd_id_full: &str,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        info!(
 | 
			
		||||
    pub async fn purge_ceph_osd(&self, client: Arc<K8sClient>) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let osd_id_numeric = self.get_ceph_osd_id_numeric().unwrap();
 | 
			
		||||
        let osd_id_full = self.get_ceph_osd_id().unwrap();
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Purging OSD {} from Ceph cluster and removing its auth key",
 | 
			
		||||
            osd_id_full
 | 
			
		||||
        );
 | 
			
		||||
@ -291,8 +292,9 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
                "app".to_string(),
 | 
			
		||||
                Some(&self.score.rook_ceph_namespace),
 | 
			
		||||
                vec![
 | 
			
		||||
                    format!("ceph osd purge {osd_id_full} --yes-i-really-mean-it").as_str(),
 | 
			
		||||
                    format!("ceph auth del osd.{osd_id_full}").as_str(),
 | 
			
		||||
                    "sh",
 | 
			
		||||
                    "-c",
 | 
			
		||||
                    format!("ceph osd purge {osd_id_numeric} --yes-i-really-mean-it && ceph auth del {osd_id_full}").as_str(),
 | 
			
		||||
                ],
 | 
			
		||||
            )
 | 
			
		||||
            .await?;
 | 
			
		||||
@ -305,10 +307,10 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
    pub async fn verify_ceph_osd_removal(
 | 
			
		||||
        &self,
 | 
			
		||||
        client: Arc<K8sClient>,
 | 
			
		||||
        osd_id_full: &str,
 | 
			
		||||
    ) -> Result<Outcome, InterpretError> {
 | 
			
		||||
        let (timeout, interval, start) = self.build_timer();
 | 
			
		||||
        info!(
 | 
			
		||||
        let osd_id_full = self.get_ceph_osd_id().unwrap();
 | 
			
		||||
        debug!(
 | 
			
		||||
            "Verifying OSD {} has been removed from the Ceph tree...",
 | 
			
		||||
            osd_id_full
 | 
			
		||||
        );
 | 
			
		||||
@ -318,7 +320,7 @@ impl CephRemoveOsdInterpret {
 | 
			
		||||
                    "rook-ceph-tools".to_string(),
 | 
			
		||||
                    "app".to_string(),
 | 
			
		||||
                    Some(&self.score.rook_ceph_namespace),
 | 
			
		||||
                    vec!["ceph osd tree -f json"],
 | 
			
		||||
                    vec!["sh", "-c", "ceph osd tree -f json"],
 | 
			
		||||
                )
 | 
			
		||||
                .await?;
 | 
			
		||||
            let tree =
 | 
			
		||||
@ -1,2 +1,2 @@
 | 
			
		||||
pub mod ceph_osd_replacement_score;
 | 
			
		||||
pub mod ceph_remove_osd_score;
 | 
			
		||||
pub mod ceph_validate_health_score;
 | 
			
		||||
 | 
			
		||||
@ -40,7 +40,7 @@ pub fn init() {
 | 
			
		||||
                HarmonyEvent::HarmonyFinished => {
 | 
			
		||||
                    if !details.is_empty() {
 | 
			
		||||
                        println!(
 | 
			
		||||
                            "\n{} All done! Here's what's next for you:",
 | 
			
		||||
                            "\n{} All done! Here's a few info for you:",
 | 
			
		||||
                            theme::EMOJI_SUMMARY
 | 
			
		||||
                        );
 | 
			
		||||
                        for detail in details.iter() {
 | 
			
		||||
 | 
			
		||||
@ -54,6 +54,9 @@ struct DeployArgs {
 | 
			
		||||
 | 
			
		||||
    #[arg(long = "profile", short = 'p', default_value = "dev")]
 | 
			
		||||
    harmony_profile: HarmonyProfile,
 | 
			
		||||
 | 
			
		||||
    #[arg(long = "dry-run", short = 'd', default_value = "false")]
 | 
			
		||||
    dry_run: bool,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Args, Clone, Debug)]
 | 
			
		||||
@ -178,6 +181,7 @@ async fn main() {
 | 
			
		||||
                command
 | 
			
		||||
                    .env("HARMONY_USE_LOCAL_K3D", format!("{use_local_k3d}"))
 | 
			
		||||
                    .env("HARMONY_PROFILE", format!("{}", args.harmony_profile))
 | 
			
		||||
                    .env("HARMONY_DRY_RUN", format!("{}", args.dry_run))
 | 
			
		||||
                    .arg("-y")
 | 
			
		||||
                    .arg("-a");
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize};
 | 
			
		||||
///
 | 
			
		||||
/// **It is not meant to be very secure or unique**, it is suitable to generate up to 10 000 items per
 | 
			
		||||
/// second with a reasonable collision rate of 0,000014 % as calculated by this calculator : https://kevingal.com/apps/collision.html
 | 
			
		||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
 | 
			
		||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
 | 
			
		||||
pub struct Id {
 | 
			
		||||
    value: String,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,2 +1,3 @@
 | 
			
		||||
pub mod id;
 | 
			
		||||
pub mod net;
 | 
			
		||||
pub mod switch;
 | 
			
		||||
 | 
			
		||||
@ -1,6 +1,6 @@
 | 
			
		||||
use serde::{Deserialize, Serialize};
 | 
			
		||||
 | 
			
		||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
 | 
			
		||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
 | 
			
		||||
pub struct MacAddress(pub [u8; 6]);
 | 
			
		||||
 | 
			
		||||
impl MacAddress {
 | 
			
		||||
@ -41,7 +41,7 @@ impl TryFrom<String> for MacAddress {
 | 
			
		||||
            bytes[i] = u8::from_str_radix(part, 16).map_err(|_| {
 | 
			
		||||
                std::io::Error::new(
 | 
			
		||||
                    std::io::ErrorKind::InvalidInput,
 | 
			
		||||
                    format!("Invalid hex value in part {}: '{}'", i, part),
 | 
			
		||||
                    format!("Invalid hex value in part {i}: '{part}'"),
 | 
			
		||||
                )
 | 
			
		||||
            })?;
 | 
			
		||||
        }
 | 
			
		||||
@ -106,8 +106,8 @@ impl Serialize for Url {
 | 
			
		||||
impl std::fmt::Display for Url {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            Url::LocalFolder(path) => write!(f, "{}", path),
 | 
			
		||||
            Url::Url(url) => write!(f, "{}", url),
 | 
			
		||||
            Url::LocalFolder(path) => write!(f, "{path}"),
 | 
			
		||||
            Url::Url(url) => write!(f, "{url}"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										176
									
								
								harmony_types/src/switch.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										176
									
								
								harmony_types/src/switch.rs
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,176 @@
 | 
			
		||||
use std::{fmt, str::FromStr};
 | 
			
		||||
 | 
			
		||||
/// Simple error type for port parsing failures.
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
pub enum PortParseError {
 | 
			
		||||
    /// The port string did not conform to the expected S/M/P or range format.
 | 
			
		||||
    InvalidFormat,
 | 
			
		||||
    /// A stack, module, or port segment could not be parsed as a number.
 | 
			
		||||
    InvalidSegment(String),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl fmt::Display for PortParseError {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            PortParseError::InvalidFormat => write!(f, "Port string is in an unexpected format."),
 | 
			
		||||
            PortParseError::InvalidSegment(s) => write!(f, "Invalid segment in port string: {}", s),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Represents the atomic, physical location of a switch port: `<Stack>/<Module>/<Port>`.
 | 
			
		||||
///
 | 
			
		||||
/// Example: `1/1/1`
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
 | 
			
		||||
pub struct PortLocation(pub u8, pub u8, pub u8);
 | 
			
		||||
 | 
			
		||||
impl fmt::Display for PortLocation {
 | 
			
		||||
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 | 
			
		||||
        write!(f, "{}/{}/{}", self.0, self.1, self.2)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl FromStr for PortLocation {
 | 
			
		||||
    type Err = PortParseError;
 | 
			
		||||
 | 
			
		||||
    /// Parses a string slice into a `PortLocation`.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Examples
 | 
			
		||||
    ///
 | 
			
		||||
    /// ```rust
 | 
			
		||||
    /// use std::str::FromStr;
 | 
			
		||||
    /// use harmony_types::switch::PortLocation;
 | 
			
		||||
    ///
 | 
			
		||||
    /// assert_eq!(PortLocation::from_str("1/1/1").unwrap(), PortLocation(1, 1, 1));
 | 
			
		||||
    /// assert_eq!(PortLocation::from_str("12/5/48").unwrap(), PortLocation(12, 5, 48));
 | 
			
		||||
    /// assert!(PortLocation::from_str("1/A/1").is_err());
 | 
			
		||||
    /// ```
 | 
			
		||||
    fn from_str(s: &str) -> Result<Self, Self::Err> {
 | 
			
		||||
        let parts: Vec<&str> = s.split('/').collect();
 | 
			
		||||
 | 
			
		||||
        if parts.len() != 3 {
 | 
			
		||||
            return Err(PortParseError::InvalidFormat);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let parse_segment = |part: &str| -> Result<u8, Self::Err> {
 | 
			
		||||
            u8::from_str(part).map_err(|_| PortParseError::InvalidSegment(part.to_string()))
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        let stack = parse_segment(parts[0])?;
 | 
			
		||||
        let module = parse_segment(parts[1])?;
 | 
			
		||||
        let port = parse_segment(parts[2])?;
 | 
			
		||||
 | 
			
		||||
        Ok(PortLocation(stack, module, port))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Represents a Port configuration input, which can be a single port, a sequential range,
 | 
			
		||||
/// or an explicit set defined by endpoints.
 | 
			
		||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
 | 
			
		||||
pub enum PortDeclaration {
 | 
			
		||||
    /// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)`
 | 
			
		||||
    Single(PortLocation),
 | 
			
		||||
    /// A strictly sequential range defined by two endpoints using the hyphen separator (`-`).
 | 
			
		||||
    /// All ports between the endpoints (inclusive) are implicitly included.
 | 
			
		||||
    /// Example: `PortDeclaration::Range(1/1/1, 1/1/4)`
 | 
			
		||||
    Range(PortLocation, PortLocation),
 | 
			
		||||
    /// A set of ports defined by two endpoints using the asterisk separator (`*`).
 | 
			
		||||
    /// The actual member ports must be determined contextually (e.g., from MAC tables or
 | 
			
		||||
    /// explicit configuration lists).
 | 
			
		||||
    /// Example: `PortDeclaration::Set(1/1/1, 1/1/3)` where only ports 1 and 3 might be active.
 | 
			
		||||
    Set(PortLocation, PortLocation),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl PortDeclaration {
 | 
			
		||||
    /// Parses a port configuration string into a structured `PortDeclaration` enum.
 | 
			
		||||
    ///
 | 
			
		||||
    /// This function performs only basic format and numerical parsing, assuming the input
 | 
			
		||||
    /// strings (e.g., from `show` commands) are semantically valid and logically ordered.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Supported Formats
 | 
			
		||||
    ///
 | 
			
		||||
    /// * **Single Port:** `"1/1/1"`
 | 
			
		||||
    /// * **Range (Hyphen, `-`):** `"1/1/1-1/1/4"`
 | 
			
		||||
    /// * **Set (Asterisk, `*`):** `"1/1/1*1/1/4"`
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Errors
 | 
			
		||||
    ///
 | 
			
		||||
    /// Returns `PortParseError` if the string format is incorrect or numerical segments
 | 
			
		||||
    /// cannot be parsed.
 | 
			
		||||
    ///
 | 
			
		||||
    /// # Examples
 | 
			
		||||
    ///
 | 
			
		||||
    /// ```rust
 | 
			
		||||
    /// use harmony_types::switch::{PortDeclaration, PortLocation};
 | 
			
		||||
    ///
 | 
			
		||||
    /// // Single Port
 | 
			
		||||
    /// assert_eq!(PortDeclaration::parse("3/2/15").unwrap(), PortDeclaration::Single(PortLocation(3, 2, 15)));
 | 
			
		||||
    ///
 | 
			
		||||
    /// // Range (Hyphen) - implies sequential ports
 | 
			
		||||
    /// let result_range = PortDeclaration::parse("1/1/1-1/1/4").unwrap();
 | 
			
		||||
    /// assert_eq!(result_range, PortDeclaration::Range(PortLocation(1, 1, 1), PortLocation(1, 1, 4)));
 | 
			
		||||
    ///
 | 
			
		||||
    /// // Set (Asterisk) - implies non-sequential set defined by endpoints
 | 
			
		||||
    /// let result_set = PortDeclaration::parse("1/1/48*2/1/48").unwrap();
 | 
			
		||||
    /// assert_eq!(result_set, PortDeclaration::Set(PortLocation(1, 1, 48), PortLocation(2, 1, 48)));
 | 
			
		||||
    ///
 | 
			
		||||
    /// // Invalid Format (will still fail basic parsing)
 | 
			
		||||
    /// assert!(PortDeclaration::parse("1/1/1/1").is_err());
 | 
			
		||||
    /// ```
 | 
			
		||||
    pub fn parse(port_str: &str) -> Result<Self, PortParseError> {
 | 
			
		||||
        if let Some((start_str, end_str)) = port_str.split_once('-') {
 | 
			
		||||
            let start_port = PortLocation::from_str(start_str.trim())?;
 | 
			
		||||
            let end_port = PortLocation::from_str(end_str.trim())?;
 | 
			
		||||
            return Ok(PortDeclaration::Range(start_port, end_port));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if let Some((start_str, end_str)) = port_str.split_once('*') {
 | 
			
		||||
            let start_port = PortLocation::from_str(start_str.trim())?;
 | 
			
		||||
            let end_port = PortLocation::from_str(end_str.trim())?;
 | 
			
		||||
            return Ok(PortDeclaration::Set(start_port, end_port));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let location = PortLocation::from_str(port_str)?;
 | 
			
		||||
        Ok(PortDeclaration::Single(location))
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl fmt::Display for PortDeclaration {
 | 
			
		||||
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 | 
			
		||||
        match self {
 | 
			
		||||
            PortDeclaration::Single(port) => write!(f, "{port}"),
 | 
			
		||||
            PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"),
 | 
			
		||||
            PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod tests {
 | 
			
		||||
    use super::*;
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn test_parse_port_location_invalid() {
 | 
			
		||||
        assert!(PortLocation::from_str("1/1").is_err());
 | 
			
		||||
        assert!(PortLocation::from_str("1/A/1").is_err());
 | 
			
		||||
        assert!(PortLocation::from_str("1/1/256").is_err());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn test_parse_declaration_single() {
 | 
			
		||||
        let single_result = PortDeclaration::parse("1/1/4").unwrap();
 | 
			
		||||
        assert!(matches!(single_result, PortDeclaration::Single(_)));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn test_parse_declaration_range() {
 | 
			
		||||
        let range_result = PortDeclaration::parse("1/1/1-1/1/4").unwrap();
 | 
			
		||||
        assert!(matches!(range_result, PortDeclaration::Range(_, _)));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn test_parse_declaration_set() {
 | 
			
		||||
        let set_result = PortDeclaration::parse("1/1/48*2/1/48").unwrap();
 | 
			
		||||
        assert!(matches!(set_result, PortDeclaration::Set(_, _)));
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(PartialEq, Debug)]
 | 
			
		||||
#[derive(PartialEq, Debug, Clone)]
 | 
			
		||||
pub struct HAProxyId(String);
 | 
			
		||||
 | 
			
		||||
impl Default for HAProxyId {
 | 
			
		||||
@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
 | 
			
		||||
    pub frontend: Vec<Frontend>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
pub struct Frontend {
 | 
			
		||||
    #[yaserde(attribute = true)]
 | 
			
		||||
    pub uuid: String,
 | 
			
		||||
@ -310,7 +310,7 @@ pub struct Frontend {
 | 
			
		||||
    pub bind_options: MaybeString,
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
    #[yaserde(rename = "defaultBackend")]
 | 
			
		||||
    pub default_backend: String,
 | 
			
		||||
    pub default_backend: Option<String>,
 | 
			
		||||
    pub ssl_enabled: i32,
 | 
			
		||||
    pub ssl_certificates: MaybeString,
 | 
			
		||||
    pub ssl_default_certificate: MaybeString,
 | 
			
		||||
@ -416,7 +416,7 @@ pub struct HAProxyBackends {
 | 
			
		||||
    pub backends: Vec<HAProxyBackend>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
pub struct HAProxyBackend {
 | 
			
		||||
    #[yaserde(attribute = true, rename = "uuid")]
 | 
			
		||||
    pub uuid: String,
 | 
			
		||||
@ -535,7 +535,7 @@ pub struct HAProxyServers {
 | 
			
		||||
    pub servers: Vec<HAProxyServer>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
pub struct HAProxyServer {
 | 
			
		||||
    #[yaserde(attribute = true, rename = "uuid")]
 | 
			
		||||
    pub uuid: String,
 | 
			
		||||
@ -543,8 +543,8 @@ pub struct HAProxyServer {
 | 
			
		||||
    pub enabled: u8,
 | 
			
		||||
    pub name: String,
 | 
			
		||||
    pub description: MaybeString,
 | 
			
		||||
    pub address: String,
 | 
			
		||||
    pub port: u16,
 | 
			
		||||
    pub address: Option<String>,
 | 
			
		||||
    pub port: Option<u16>,
 | 
			
		||||
    pub checkport: MaybeString,
 | 
			
		||||
    pub mode: String,
 | 
			
		||||
    pub multiplexer_protocol: MaybeString,
 | 
			
		||||
@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
 | 
			
		||||
    pub healthchecks: Vec<HAProxyHealthCheck>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
 | 
			
		||||
pub struct HAProxyHealthCheck {
 | 
			
		||||
    #[yaserde(attribute = true)]
 | 
			
		||||
    pub uuid: String,
 | 
			
		||||
 | 
			
		||||
@ -25,6 +25,7 @@ sha2 = "0.10.9"
 | 
			
		||||
 | 
			
		||||
[dev-dependencies]
 | 
			
		||||
pretty_assertions.workspace = true
 | 
			
		||||
assertor.workspace = true
 | 
			
		||||
 | 
			
		||||
[lints.rust]
 | 
			
		||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }
 | 
			
		||||
 | 
			
		||||
@ -30,8 +30,7 @@ impl SshConfigManager {
 | 
			
		||||
 | 
			
		||||
        self.opnsense_shell
 | 
			
		||||
            .exec(&format!(
 | 
			
		||||
                "cp /conf/config.xml /conf/backup/{}",
 | 
			
		||||
                backup_filename
 | 
			
		||||
                "cp /conf/config.xml /conf/backup/{backup_filename}"
 | 
			
		||||
            ))
 | 
			
		||||
            .await
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -1,9 +1,7 @@
 | 
			
		||||
mod ssh;
 | 
			
		||||
pub use ssh::*;
 | 
			
		||||
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
 | 
			
		||||
use crate::Error;
 | 
			
		||||
use async_trait::async_trait;
 | 
			
		||||
pub use ssh::*;
 | 
			
		||||
 | 
			
		||||
#[async_trait]
 | 
			
		||||
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
 | 
			
		||||
 | 
			
		||||
@ -1,11 +1,8 @@
 | 
			
		||||
use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
use log::warn;
 | 
			
		||||
use crate::{config::OPNsenseShell, Error};
 | 
			
		||||
use opnsense_config_xml::{
 | 
			
		||||
    Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
use crate::{config::OPNsenseShell, Error};
 | 
			
		||||
use std::{collections::HashSet, sync::Arc};
 | 
			
		||||
 | 
			
		||||
pub struct LoadBalancerConfig<'a> {
 | 
			
		||||
    opnsense: &'a mut OPNsense,
 | 
			
		||||
@ -31,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> {
 | 
			
		||||
        match &mut self.opnsense.opnsense.haproxy.as_mut() {
 | 
			
		||||
            Some(haproxy) => f(haproxy),
 | 
			
		||||
            None => unimplemented!(
 | 
			
		||||
                "Adding a backend is not supported when haproxy config does not exist yet"
 | 
			
		||||
                "Cannot configure load balancer when haproxy config does not exist yet"
 | 
			
		||||
            ),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> {
 | 
			
		||||
        self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn add_backend(&mut self, backend: HAProxyBackend) {
 | 
			
		||||
        warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
 | 
			
		||||
        self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
 | 
			
		||||
    /// Configures a service by removing any existing service on the same port
 | 
			
		||||
    /// and then adding the new definition. This ensures idempotency.
 | 
			
		||||
    pub fn configure_service(
 | 
			
		||||
        &mut self,
 | 
			
		||||
        frontend: Frontend,
 | 
			
		||||
        backend: HAProxyBackend,
 | 
			
		||||
        servers: Vec<HAProxyServer>,
 | 
			
		||||
        healthcheck: Option<HAProxyHealthCheck>,
 | 
			
		||||
    ) {
 | 
			
		||||
        self.remove_service_by_bind_address(&frontend.bind);
 | 
			
		||||
        self.remove_servers(&servers);
 | 
			
		||||
 | 
			
		||||
        self.add_new_service(frontend, backend, servers, healthcheck);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn add_frontend(&mut self, frontend: Frontend) {
 | 
			
		||||
        self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
 | 
			
		||||
    // Remove the corresponding real servers based on their name if they already exist.
 | 
			
		||||
    fn remove_servers(&mut self, servers: &[HAProxyServer]) {
 | 
			
		||||
        let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
 | 
			
		||||
        self.with_haproxy(|haproxy| {
 | 
			
		||||
            haproxy
 | 
			
		||||
                .servers
 | 
			
		||||
                .servers
 | 
			
		||||
                .retain(|s| !server_names.contains(&s.name));
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
 | 
			
		||||
        self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
 | 
			
		||||
    /// Removes a service and its dependent components based on the frontend's bind address.
 | 
			
		||||
    /// This performs a cascading delete of the frontend, backend, servers, and health check.
 | 
			
		||||
    fn remove_service_by_bind_address(&mut self, bind_address: &str) {
 | 
			
		||||
        self.with_haproxy(|haproxy| {
 | 
			
		||||
            let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
 | 
			
		||||
                return;
 | 
			
		||||
            };
 | 
			
		||||
 | 
			
		||||
            let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
 | 
			
		||||
                return;
 | 
			
		||||
            };
 | 
			
		||||
 | 
			
		||||
            remove_healthcheck(haproxy, &old_backend);
 | 
			
		||||
            remove_linked_servers(haproxy, &old_backend);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
 | 
			
		||||
        self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
 | 
			
		||||
    /// Adds the components of a new service to the HAProxy configuration.
 | 
			
		||||
    /// This function de-duplicates servers by name to prevent configuration errors.
 | 
			
		||||
    fn add_new_service(
 | 
			
		||||
        &mut self,
 | 
			
		||||
        frontend: Frontend,
 | 
			
		||||
        backend: HAProxyBackend,
 | 
			
		||||
        servers: Vec<HAProxyServer>,
 | 
			
		||||
        healthcheck: Option<HAProxyHealthCheck>,
 | 
			
		||||
    ) {
 | 
			
		||||
        self.with_haproxy(|haproxy| {
 | 
			
		||||
            if let Some(check) = healthcheck {
 | 
			
		||||
                haproxy.healthchecks.healthchecks.push(check);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            haproxy.servers.servers.extend(servers);
 | 
			
		||||
            haproxy.backends.backends.push(backend);
 | 
			
		||||
            haproxy.frontends.frontend.push(frontend);
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub async fn reload_restart(&self) -> Result<(), Error> {
 | 
			
		||||
@ -82,3 +125,262 @@ impl<'a> LoadBalancerConfig<'a> {
 | 
			
		||||
        Ok(())
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
 | 
			
		||||
    let pos = haproxy
 | 
			
		||||
        .frontends
 | 
			
		||||
        .frontend
 | 
			
		||||
        .iter()
 | 
			
		||||
        .position(|f| f.bind == bind_address);
 | 
			
		||||
 | 
			
		||||
    match pos {
 | 
			
		||||
        Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
 | 
			
		||||
        None => None,
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
 | 
			
		||||
    let default_backend = old_frontend.default_backend?;
 | 
			
		||||
    let pos = haproxy
 | 
			
		||||
        .backends
 | 
			
		||||
        .backends
 | 
			
		||||
        .iter()
 | 
			
		||||
        .position(|b| b.uuid == default_backend);
 | 
			
		||||
 | 
			
		||||
    match pos {
 | 
			
		||||
        Some(pos) => Some(haproxy.backends.backends.remove(pos)),
 | 
			
		||||
        None => None, // orphaned frontend, shouldn't happen
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
 | 
			
		||||
    if let Some(uuid) = &backend.health_check.content {
 | 
			
		||||
        haproxy
 | 
			
		||||
            .healthchecks
 | 
			
		||||
            .healthchecks
 | 
			
		||||
            .retain(|h| h.uuid != *uuid);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Remove the backend's servers. This assumes servers are not shared between services.
 | 
			
		||||
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
 | 
			
		||||
    if let Some(server_uuids_str) = &backend.linked_servers.content {
 | 
			
		||||
        let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
 | 
			
		||||
        haproxy
 | 
			
		||||
            .servers
 | 
			
		||||
            .servers
 | 
			
		||||
            .retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
mod tests {
 | 
			
		||||
    use crate::config::DummyOPNSenseShell;
 | 
			
		||||
    use assertor::*;
 | 
			
		||||
    use opnsense_config_xml::{
 | 
			
		||||
        Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
 | 
			
		||||
        HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
 | 
			
		||||
    };
 | 
			
		||||
    use std::sync::Arc;
 | 
			
		||||
 | 
			
		||||
    use super::LoadBalancerConfig;
 | 
			
		||||
 | 
			
		||||
    static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
 | 
			
		||||
    static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
 | 
			
		||||
 | 
			
		||||
    static SERVER_ADDRESS: &str = "1.1.1.1:80";
 | 
			
		||||
    static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn configure_service_should_add_all_service_components_to_haproxy() {
 | 
			
		||||
        let mut opnsense = given_opnsense();
 | 
			
		||||
        let mut load_balancer = given_load_balancer(&mut opnsense);
 | 
			
		||||
        let (healthcheck, servers, backend, frontend) =
 | 
			
		||||
            given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
 | 
			
		||||
 | 
			
		||||
        load_balancer.configure_service(
 | 
			
		||||
            frontend.clone(),
 | 
			
		||||
            backend.clone(),
 | 
			
		||||
            servers.clone(),
 | 
			
		||||
            Some(healthcheck.clone()),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        assert_haproxy_configured_with(
 | 
			
		||||
            opnsense,
 | 
			
		||||
            vec![frontend],
 | 
			
		||||
            vec![backend],
 | 
			
		||||
            servers,
 | 
			
		||||
            vec![healthcheck],
 | 
			
		||||
        );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn configure_service_should_replace_service_on_same_bind_address() {
 | 
			
		||||
        let (healthcheck, servers, backend, frontend) =
 | 
			
		||||
            given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
 | 
			
		||||
        let mut opnsense = given_opnsense_with(given_haproxy(
 | 
			
		||||
            vec![frontend.clone()],
 | 
			
		||||
            vec![backend.clone()],
 | 
			
		||||
            servers.clone(),
 | 
			
		||||
            vec![healthcheck.clone()],
 | 
			
		||||
        ));
 | 
			
		||||
        let mut load_balancer = given_load_balancer(&mut opnsense);
 | 
			
		||||
 | 
			
		||||
        let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
 | 
			
		||||
            given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
 | 
			
		||||
 | 
			
		||||
        load_balancer.configure_service(
 | 
			
		||||
            updated_frontend.clone(),
 | 
			
		||||
            updated_backend.clone(),
 | 
			
		||||
            updated_servers.clone(),
 | 
			
		||||
            Some(updated_healthcheck.clone()),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        assert_haproxy_configured_with(
 | 
			
		||||
            opnsense,
 | 
			
		||||
            vec![updated_frontend],
 | 
			
		||||
            vec![updated_backend],
 | 
			
		||||
            updated_servers,
 | 
			
		||||
            vec![updated_healthcheck],
 | 
			
		||||
        );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #[test]
 | 
			
		||||
    fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
 | 
			
		||||
        let (healthcheck, servers, backend, frontend) =
 | 
			
		||||
            given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
 | 
			
		||||
        let (other_healthcheck, other_servers, other_backend, other_frontend) =
 | 
			
		||||
            given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
 | 
			
		||||
        let mut opnsense = given_opnsense_with(given_haproxy(
 | 
			
		||||
            vec![frontend.clone()],
 | 
			
		||||
            vec![backend.clone()],
 | 
			
		||||
            servers.clone(),
 | 
			
		||||
            vec![healthcheck.clone()],
 | 
			
		||||
        ));
 | 
			
		||||
        let mut load_balancer = given_load_balancer(&mut opnsense);
 | 
			
		||||
 | 
			
		||||
        load_balancer.configure_service(
 | 
			
		||||
            other_frontend.clone(),
 | 
			
		||||
            other_backend.clone(),
 | 
			
		||||
            other_servers.clone(),
 | 
			
		||||
            Some(other_healthcheck.clone()),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        assert_haproxy_configured_with(
 | 
			
		||||
            opnsense,
 | 
			
		||||
            vec![frontend, other_frontend],
 | 
			
		||||
            vec![backend, other_backend],
 | 
			
		||||
            [servers, other_servers].concat(),
 | 
			
		||||
            vec![healthcheck, other_healthcheck],
 | 
			
		||||
        );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn assert_haproxy_configured_with(
 | 
			
		||||
        opnsense: OPNsense,
 | 
			
		||||
        frontends: Vec<Frontend>,
 | 
			
		||||
        backends: Vec<HAProxyBackend>,
 | 
			
		||||
        servers: Vec<HAProxyServer>,
 | 
			
		||||
        healthchecks: Vec<HAProxyHealthCheck>,
 | 
			
		||||
    ) {
 | 
			
		||||
        let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
 | 
			
		||||
        assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
 | 
			
		||||
        assert_that!(haproxy.backends.backends).contains_exactly(backends);
 | 
			
		||||
        assert_that!(haproxy.servers.servers).is_equal_to(servers);
 | 
			
		||||
        assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_opnsense() -> OPNsense {
 | 
			
		||||
        OPNsense::default()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
 | 
			
		||||
        let mut opnsense = OPNsense::default();
 | 
			
		||||
        opnsense.opnsense.haproxy = Some(haproxy);
 | 
			
		||||
 | 
			
		||||
        opnsense
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
 | 
			
		||||
        let opnsense_shell = Arc::new(DummyOPNSenseShell {});
 | 
			
		||||
        if opnsense.opnsense.haproxy.is_none() {
 | 
			
		||||
            opnsense.opnsense.haproxy = Some(HAProxy::default());
 | 
			
		||||
        }
 | 
			
		||||
        LoadBalancerConfig::new(opnsense, opnsense_shell)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_service(
 | 
			
		||||
        bind_address: &str,
 | 
			
		||||
        server_address: &str,
 | 
			
		||||
    ) -> (
 | 
			
		||||
        HAProxyHealthCheck,
 | 
			
		||||
        Vec<HAProxyServer>,
 | 
			
		||||
        HAProxyBackend,
 | 
			
		||||
        Frontend,
 | 
			
		||||
    ) {
 | 
			
		||||
        let healthcheck = given_healthcheck();
 | 
			
		||||
        let servers = vec![given_server(server_address)];
 | 
			
		||||
        let backend = given_backend();
 | 
			
		||||
        let frontend = given_frontend(bind_address);
 | 
			
		||||
        (healthcheck, servers, backend, frontend)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_haproxy(
 | 
			
		||||
        frontends: Vec<Frontend>,
 | 
			
		||||
        backends: Vec<HAProxyBackend>,
 | 
			
		||||
        servers: Vec<HAProxyServer>,
 | 
			
		||||
        healthchecks: Vec<HAProxyHealthCheck>,
 | 
			
		||||
    ) -> HAProxy {
 | 
			
		||||
        HAProxy {
 | 
			
		||||
            frontends: HAProxyFrontends {
 | 
			
		||||
                frontend: frontends,
 | 
			
		||||
            },
 | 
			
		||||
            backends: HAProxyBackends { backends },
 | 
			
		||||
            servers: HAProxyServers { servers },
 | 
			
		||||
            healthchecks: HAProxyHealthChecks { healthchecks },
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_frontend(bind_address: &str) -> Frontend {
 | 
			
		||||
        Frontend {
 | 
			
		||||
            uuid: "uuid".into(),
 | 
			
		||||
            id: HAProxyId::default(),
 | 
			
		||||
            enabled: 1,
 | 
			
		||||
            name: format!("frontend_{bind_address}"),
 | 
			
		||||
            bind: bind_address.into(),
 | 
			
		||||
            default_backend: Some("backend-uuid".into()),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_backend() -> HAProxyBackend {
 | 
			
		||||
        HAProxyBackend {
 | 
			
		||||
            uuid: "backend-uuid".into(),
 | 
			
		||||
            id: HAProxyId::default(),
 | 
			
		||||
            enabled: 1,
 | 
			
		||||
            name: "backend_192.168.1.1:80".into(),
 | 
			
		||||
            linked_servers: MaybeString::from("server-uuid"),
 | 
			
		||||
            health_check_enabled: 1,
 | 
			
		||||
            health_check: MaybeString::from("healthcheck-uuid"),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_server(address: &str) -> HAProxyServer {
 | 
			
		||||
        HAProxyServer {
 | 
			
		||||
            uuid: "server-uuid".into(),
 | 
			
		||||
            id: HAProxyId::default(),
 | 
			
		||||
            name: address.into(),
 | 
			
		||||
            address: Some(address.into()),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn given_healthcheck() -> HAProxyHealthCheck {
 | 
			
		||||
        HAProxyHealthCheck {
 | 
			
		||||
            uuid: "healthcheck-uuid".into(),
 | 
			
		||||
            name: "healthcheck".into(),
 | 
			
		||||
            ..Default::default()
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user