Compare commits
	
		
			75 Commits
		
	
	
		
			feat/multi
			...
			master
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| c069207f12 | |||
|  | 7368184917 | ||
| 05205f4ac1 | |||
| 3174645c97 | |||
| 7536f4ec4b | |||
| 464347d3e5 | |||
| 7f415f5b98 | |||
| 2a520a1d7c | |||
| 987f195e2f | |||
| 14d1823d15 | |||
| 2a48d51479 | |||
| 20a227bb41 | |||
| ce91ee0168 | |||
| ed7f81aa1f | |||
| cb66b7592e | |||
| a815f6ac9c | |||
| 2d891e4463 | |||
| f66e58b9ca | |||
| ea39d93aa7 | |||
| 6989d208cf | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| c0bd8007c7 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| cf576192a8 | |||
| 5f78300d78 | |||
| f7e9669009 | |||
| 2d3c32469c | |||
| f65e16df7b | |||
| 1cec398d4d | |||
| 58b6268989 | |||
| cbbaae2ac8 | |||
| 4a500e4eb7 | |||
| f073b7e5fb | |||
| c84b2413ed | |||
| f83fd09f11 | |||
| c15bd53331 | |||
| 6e6f57e38c | |||
| 6f55f79281 | |||
| 19f87fdaf7 | |||
| 49370af176 | |||
| cf0b8326dc | |||
| 1e2563f7d1 | |||
| 7f50c36f11 | |||
| 4df451bc41 | |||
| 49dad343ad | |||
| 9961e8b79d | |||
| 9b889f71da | |||
| 7514ebfb5c | |||
| b3ae4e6611 | |||
| 8424778871 | |||
| 7bc083701e | |||
| 4fa2b8deb6 | |||
|  | f3639c604c | ||
| 258cfa279e | |||
| ceafabf430 | |||
| 11481b16cd | |||
| 21dcb75408 | |||
| a5f9ecfcf7 | |||
| 849bd79710 | |||
| c5101e096a | |||
| cd0720f43e | |||
| b9e04d21da | |||
| a0884950d7 | |||
| 29d22a611f | |||
| 3bf5cb0526 | |||
| 54803c40a2 | |||
| 288129b0c1 | 
							
								
								
									
										65
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						| @ -429,6 +429,15 @@ dependencies = [ | ||||
|  "wait-timeout", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "assertor" | ||||
| version = "0.0.4" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb" | ||||
| dependencies = [ | ||||
|  "num-traits", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "async-broadcast" | ||||
| version = "0.7.2" | ||||
| @ -665,6 +674,22 @@ dependencies = [ | ||||
|  "serde_with", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "brocade" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "async-trait", | ||||
|  "env_logger", | ||||
|  "harmony_secret", | ||||
|  "harmony_types", | ||||
|  "log", | ||||
|  "regex", | ||||
|  "russh", | ||||
|  "russh-keys", | ||||
|  "serde", | ||||
|  "tokio", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "brotli" | ||||
| version = "8.0.2" | ||||
| @ -1755,6 +1780,7 @@ dependencies = [ | ||||
| name = "example-nanodc" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "brocade", | ||||
|  "cidr", | ||||
|  "env_logger", | ||||
|  "harmony", | ||||
| @ -1763,6 +1789,7 @@ dependencies = [ | ||||
|  "harmony_tui", | ||||
|  "harmony_types", | ||||
|  "log", | ||||
|  "serde", | ||||
|  "tokio", | ||||
|  "url", | ||||
| ] | ||||
| @ -1781,6 +1808,7 @@ dependencies = [ | ||||
| name = "example-okd-install" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "brocade", | ||||
|  "cidr", | ||||
|  "env_logger", | ||||
|  "harmony", | ||||
| @ -1795,17 +1823,32 @@ dependencies = [ | ||||
|  "url", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "example-openbao" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "harmony", | ||||
|  "harmony_cli", | ||||
|  "harmony_macros", | ||||
|  "harmony_types", | ||||
|  "tokio", | ||||
|  "url", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "example-opnsense" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "brocade", | ||||
|  "cidr", | ||||
|  "env_logger", | ||||
|  "harmony", | ||||
|  "harmony_macros", | ||||
|  "harmony_secret", | ||||
|  "harmony_tui", | ||||
|  "harmony_types", | ||||
|  "log", | ||||
|  "serde", | ||||
|  "tokio", | ||||
|  "url", | ||||
| ] | ||||
| @ -1814,6 +1857,7 @@ dependencies = [ | ||||
| name = "example-pxe" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "brocade", | ||||
|  "cidr", | ||||
|  "env_logger", | ||||
|  "harmony", | ||||
| @ -1828,6 +1872,15 @@ dependencies = [ | ||||
|  "url", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "example-remove-rook-osd" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "harmony", | ||||
|  "harmony_cli", | ||||
|  "tokio", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "example-rust" | ||||
| version = "0.1.0" | ||||
| @ -2305,9 +2358,11 @@ name = "harmony" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "askama", | ||||
|  "assertor", | ||||
|  "async-trait", | ||||
|  "base64 0.22.1", | ||||
|  "bollard", | ||||
|  "brocade", | ||||
|  "chrono", | ||||
|  "cidr", | ||||
|  "convert_case", | ||||
| @ -2338,6 +2393,7 @@ dependencies = [ | ||||
|  "once_cell", | ||||
|  "opnsense-config", | ||||
|  "opnsense-config-xml", | ||||
|  "option-ext", | ||||
|  "pretty_assertions", | ||||
|  "reqwest 0.11.27", | ||||
|  "russh", | ||||
| @ -3878,6 +3934,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" | ||||
| name = "opnsense-config" | ||||
| version = "0.1.0" | ||||
| dependencies = [ | ||||
|  "assertor", | ||||
|  "async-trait", | ||||
|  "chrono", | ||||
|  "env_logger", | ||||
| @ -4537,9 +4594,9 @@ dependencies = [ | ||||
| 
 | ||||
| [[package]] | ||||
| name = "regex" | ||||
| version = "1.11.2" | ||||
| version = "1.11.3" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" | ||||
| checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" | ||||
| dependencies = [ | ||||
|  "aho-corasick 1.1.3", | ||||
|  "memchr", | ||||
| @ -4549,9 +4606,9 @@ dependencies = [ | ||||
| 
 | ||||
| [[package]] | ||||
| name = "regex-automata" | ||||
| version = "0.4.10" | ||||
| version = "0.4.11" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" | ||||
| checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" | ||||
| dependencies = [ | ||||
|  "aho-corasick 1.1.3", | ||||
|  "memchr", | ||||
|  | ||||
							
								
								
									
										15
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						| @ -14,7 +14,9 @@ members = [ | ||||
|   "harmony_composer", | ||||
|   "harmony_inventory_agent", | ||||
|   "harmony_secret_derive", | ||||
|   "harmony_secret", "adr/agent_discovery/mdns", | ||||
|   "harmony_secret", | ||||
|   "adr/agent_discovery/mdns", | ||||
|   "brocade", | ||||
| ] | ||||
| 
 | ||||
| [workspace.package] | ||||
| @ -66,5 +68,12 @@ thiserror = "2.0.14" | ||||
| serde = { version = "1.0.209", features = ["derive", "rc"] } | ||||
| serde_json = "1.0.127" | ||||
| askama = "0.14" | ||||
| sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] } | ||||
| reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false } | ||||
| sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } | ||||
| reqwest = { version = "0.12", features = [ | ||||
|   "blocking", | ||||
|   "stream", | ||||
|   "rustls-tls", | ||||
|   "http2", | ||||
|   "json", | ||||
| ], default-features = false } | ||||
| assertor = "0.0.4" | ||||
|  | ||||
							
								
								
									
										69
									
								
								README.md
									
									
									
									
									
								
							
							
						
						| @ -36,48 +36,59 @@ These principles surface as simple, ergonomic Rust APIs that let teams focus on | ||||
| 
 | ||||
| ## 2 · Quick Start | ||||
| 
 | ||||
| The snippet below spins up a complete **production-grade LAMP stack** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines. | ||||
| The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines. | ||||
| 
 | ||||
| ```rust | ||||
| use harmony::{ | ||||
|     data::Version, | ||||
|     inventory::Inventory, | ||||
|     maestro::Maestro, | ||||
|     modules::{ | ||||
|         lamp::{LAMPConfig, LAMPScore}, | ||||
|         monitoring::monitoring_alerting::MonitoringAlertingStackScore, | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::{PackagingDeployment, rhob_monitoring::Monitoring}, | ||||
|         }, | ||||
|         monitoring::alert_channel::discord_alert_channel::DiscordWebhook, | ||||
|     }, | ||||
|     topology::{K8sAnywhereTopology, Url}, | ||||
|     topology::K8sAnywhereTopology, | ||||
| }; | ||||
| use harmony_macros::hurl; | ||||
| use std::{path::PathBuf, sync::Arc}; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     // 1. Describe what you want | ||||
|     let lamp_stack = LAMPScore { | ||||
|         name: "harmony-lamp-demo".into(), | ||||
|         domain: Url::Url(url::Url::parse("https://lampdemo.example.com").unwrap()), | ||||
|         php_version: Version::from("8.3.0").unwrap(), | ||||
|         config: LAMPConfig { | ||||
|             project_root: "./php".into(), | ||||
|             database_size: "4Gi".into(), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "harmony-example-leptos".to_string(), | ||||
|         project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 8080, | ||||
|     }); | ||||
| 
 | ||||
|     // Define your Application deployment and the features you want | ||||
|     let app = ApplicationScore { | ||||
|         features: vec![ | ||||
|             Box::new(PackagingDeployment { | ||||
|                 application: application.clone(), | ||||
|             }), | ||||
|             Box::new(Monitoring { | ||||
|                 application: application.clone(), | ||||
|                 alert_receiver: vec![ | ||||
|                     Box::new(DiscordWebhook { | ||||
|                         name: "test-discord".to_string(), | ||||
|                         url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url | ||||
|                     }), | ||||
|                 ], | ||||
|             }), | ||||
|         ], | ||||
|         application, | ||||
|     }; | ||||
| 
 | ||||
|     // 2. Enhance with extra scores (monitoring, CI/CD, …) | ||||
|     let mut monitoring = MonitoringAlertingStackScore::new(); | ||||
|     monitoring.namespace = Some(lamp_stack.config.namespace.clone()); | ||||
| 
 | ||||
|     // 3. Run your scores on the desired topology & inventory | ||||
|     harmony_cli::run( | ||||
|         Inventory::autoload(),                // auto-detect hardware / kube-config | ||||
|         K8sAnywhereTopology::from_env(),      // local k3d, CI, staging, prod… | ||||
|         vec![ | ||||
|           Box::new(lamp_stack), | ||||
|           Box::new(monitoring) | ||||
|         ], | ||||
|         None | ||||
|     ).await.unwrap(); | ||||
|         Inventory::autoload(), | ||||
|         K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster | ||||
|         vec![Box::new(app)], | ||||
|         None, | ||||
|     ) | ||||
|     .await | ||||
|     .unwrap(); | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										18
									
								
								brocade/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,18 @@ | ||||
| [package] | ||||
| name = "brocade" | ||||
| edition = "2024" | ||||
| version.workspace = true | ||||
| readme.workspace = true | ||||
| license.workspace = true | ||||
| 
 | ||||
| [dependencies] | ||||
| async-trait.workspace = true | ||||
| harmony_types = { path = "../harmony_types" } | ||||
| russh.workspace = true | ||||
| russh-keys.workspace = true | ||||
| tokio.workspace = true | ||||
| log.workspace = true | ||||
| env_logger.workspace = true | ||||
| regex = "1.11.3" | ||||
| harmony_secret = { path = "../harmony_secret" } | ||||
| serde.workspace = true | ||||
							
								
								
									
										70
									
								
								brocade/examples/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,70 @@ | ||||
| use std::net::{IpAddr, Ipv4Addr}; | ||||
| 
 | ||||
| use brocade::BrocadeOptions; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use harmony_types::switch::PortLocation; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| #[derive(Secret, Clone, Debug, Serialize, Deserialize)] | ||||
| struct BrocadeSwitchAuth { | ||||
|     username: String, | ||||
|     password: String, | ||||
| } | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); | ||||
| 
 | ||||
|     // let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
 | ||||
|     let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
 | ||||
|     // let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
 | ||||
|     let switch_addresses = vec![ip]; | ||||
| 
 | ||||
|     let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>() | ||||
|         .await | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     let brocade = brocade::init( | ||||
|         &switch_addresses, | ||||
|         22, | ||||
|         &config.username, | ||||
|         &config.password, | ||||
|         Some(BrocadeOptions { | ||||
|             dry_run: true, | ||||
|             ..Default::default() | ||||
|         }), | ||||
|     ) | ||||
|     .await | ||||
|     .expect("Brocade client failed to connect"); | ||||
| 
 | ||||
|     let entries = brocade.get_stack_topology().await.unwrap(); | ||||
|     println!("Stack topology: {entries:#?}"); | ||||
| 
 | ||||
|     let entries = brocade.get_interfaces().await.unwrap(); | ||||
|     println!("Interfaces: {entries:#?}"); | ||||
| 
 | ||||
|     let version = brocade.version().await.unwrap(); | ||||
|     println!("Version: {version:?}"); | ||||
| 
 | ||||
|     println!("--------------"); | ||||
|     let mac_adddresses = brocade.get_mac_address_table().await.unwrap(); | ||||
|     println!("VLAN\tMAC\t\t\tPORT"); | ||||
|     for mac in mac_adddresses { | ||||
|         println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port); | ||||
|     } | ||||
| 
 | ||||
|     println!("--------------"); | ||||
|     let channel_name = "1"; | ||||
|     brocade.clear_port_channel(channel_name).await.unwrap(); | ||||
| 
 | ||||
|     println!("--------------"); | ||||
|     let channel_id = brocade.find_available_channel_id().await.unwrap(); | ||||
| 
 | ||||
|     println!("--------------"); | ||||
|     let channel_name = "HARMONY_LAG"; | ||||
|     let ports = [PortLocation(2, 0, 35)]; | ||||
|     brocade | ||||
|         .create_port_channel(channel_id, channel_name, &ports) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| } | ||||
							
								
								
									
										212
									
								
								brocade/src/fast_iron.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,212 @@ | ||||
| use super::BrocadeClient; | ||||
| use crate::{ | ||||
|     BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry, | ||||
|     PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell, | ||||
| }; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::switch::{PortDeclaration, PortLocation}; | ||||
| use log::{debug, info}; | ||||
| use regex::Regex; | ||||
| use std::{collections::HashSet, str::FromStr}; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct FastIronClient { | ||||
|     shell: BrocadeShell, | ||||
|     version: BrocadeInfo, | ||||
| } | ||||
| 
 | ||||
| impl FastIronClient { | ||||
|     pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { | ||||
|         shell.before_all(vec!["skip-page-display".into()]); | ||||
|         shell.after_all(vec!["page".into()]); | ||||
| 
 | ||||
|         Self { | ||||
|             shell, | ||||
|             version: version_info, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> { | ||||
|         debug!("[Brocade] Parsing mac address entry: {line}"); | ||||
|         let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|         if parts.len() < 3 { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let (vlan, mac_address, port) = match parts.len() { | ||||
|             3 => ( | ||||
|                 u16::from_str(parts[0]).ok()?, | ||||
|                 parse_brocade_mac_address(parts[1]).ok()?, | ||||
|                 parts[2].to_string(), | ||||
|             ), | ||||
|             _ => ( | ||||
|                 1, | ||||
|                 parse_brocade_mac_address(parts[0]).ok()?, | ||||
|                 parts[1].to_string(), | ||||
|             ), | ||||
|         }; | ||||
| 
 | ||||
|         let port = | ||||
|             PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); | ||||
| 
 | ||||
|         match port { | ||||
|             Ok(p) => Some(Ok(MacAddressEntry { | ||||
|                 vlan, | ||||
|                 mac_address, | ||||
|                 port: p, | ||||
|             })), | ||||
|             Err(e) => Some(Err(e)), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> { | ||||
|         debug!("[Brocade] Parsing stack port entry: {line}"); | ||||
|         let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|         if parts.len() < 10 { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let local_port = PortLocation::from_str(parts[0]).ok()?; | ||||
| 
 | ||||
|         Some(Ok(InterSwitchLink { | ||||
|             local_port, | ||||
|             remote_port: None, | ||||
|         })) | ||||
|     } | ||||
| 
 | ||||
|     fn build_port_channel_commands( | ||||
|         &self, | ||||
|         channel_id: PortChannelId, | ||||
|         channel_name: &str, | ||||
|         ports: &[PortLocation], | ||||
|     ) -> Vec<String> { | ||||
|         let mut commands = vec![ | ||||
|             "configure terminal".to_string(), | ||||
|             format!("lag {channel_name} static id {channel_id}"), | ||||
|         ]; | ||||
| 
 | ||||
|         for port in ports { | ||||
|             commands.push(format!("ports ethernet {port}")); | ||||
|         } | ||||
| 
 | ||||
|         commands.push(format!("primary-port {}", ports[0])); | ||||
|         commands.push("deploy".into()); | ||||
|         commands.push("exit".into()); | ||||
|         commands.push("write memory".into()); | ||||
|         commands.push("exit".into()); | ||||
| 
 | ||||
|         commands | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl BrocadeClient for FastIronClient { | ||||
|     async fn version(&self) -> Result<BrocadeInfo, Error> { | ||||
|         Ok(self.version.clone()) | ||||
|     } | ||||
| 
 | ||||
|     async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> { | ||||
|         info!("[Brocade] Showing MAC address table..."); | ||||
| 
 | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show mac-address", ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         output | ||||
|             .lines() | ||||
|             .skip(2) | ||||
|             .filter_map(|line| self.parse_mac_entry(line)) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> { | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show interface stack-ports", crate::ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         output | ||||
|             .lines() | ||||
|             .skip(1) | ||||
|             .filter_map(|line| self.parse_stack_port_entry(line)) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_interfaces( | ||||
|         &self, | ||||
|         _interfaces: Vec<(String, PortOperatingMode)>, | ||||
|     ) -> Result<(), Error> { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> { | ||||
|         info!("[Brocade] Finding next available channel id..."); | ||||
| 
 | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show lag", ExecutionMode::Regular) | ||||
|             .await?; | ||||
|         let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex"); | ||||
| 
 | ||||
|         let used_ids: HashSet<u8> = output | ||||
|             .lines() | ||||
|             .filter_map(|line| { | ||||
|                 re.captures(line) | ||||
|                     .and_then(|c| c.get(1)) | ||||
|                     .and_then(|id_match| id_match.as_str().parse().ok()) | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let mut next_id: u8 = 1; | ||||
|         loop { | ||||
|             if !used_ids.contains(&next_id) { | ||||
|                 break; | ||||
|             } | ||||
|             next_id += 1; | ||||
|         } | ||||
| 
 | ||||
|         info!("[Brocade] Found channel id: {next_id}"); | ||||
|         Ok(next_id) | ||||
|     } | ||||
| 
 | ||||
|     async fn create_port_channel( | ||||
|         &self, | ||||
|         channel_id: PortChannelId, | ||||
|         channel_name: &str, | ||||
|         ports: &[PortLocation], | ||||
|     ) -> Result<(), Error> { | ||||
|         info!( | ||||
|             "[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}" | ||||
|         ); | ||||
| 
 | ||||
|         let commands = self.build_port_channel_commands(channel_id, channel_name, ports); | ||||
|         self.shell | ||||
|             .run_commands(commands, ExecutionMode::Privileged) | ||||
|             .await?; | ||||
| 
 | ||||
|         info!("[Brocade] Port-channel '{channel_name}' configured."); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { | ||||
|         info!("[Brocade] Clearing port-channel: {channel_name}"); | ||||
| 
 | ||||
|         let commands = vec![ | ||||
|             "configure terminal".to_string(), | ||||
|             format!("no lag {channel_name}"), | ||||
|             "write memory".to_string(), | ||||
|         ]; | ||||
|         self.shell | ||||
|             .run_commands(commands, ExecutionMode::Privileged) | ||||
|             .await?; | ||||
| 
 | ||||
|         info!("[Brocade] Port-channel '{channel_name}' cleared."); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
							
								
								
									
										338
									
								
								brocade/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,338 @@ | ||||
| use std::net::IpAddr; | ||||
| use std::{ | ||||
|     fmt::{self, Display}, | ||||
|     time::Duration, | ||||
| }; | ||||
| 
 | ||||
| use crate::network_operating_system::NetworkOperatingSystemClient; | ||||
| use crate::{ | ||||
|     fast_iron::FastIronClient, | ||||
|     shell::{BrocadeSession, BrocadeShell}, | ||||
| }; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::net::MacAddress; | ||||
| use harmony_types::switch::{PortDeclaration, PortLocation}; | ||||
| use regex::Regex; | ||||
| 
 | ||||
| mod fast_iron; | ||||
| mod network_operating_system; | ||||
| mod shell; | ||||
| mod ssh; | ||||
| 
 | ||||
| #[derive(Default, Clone, Debug)] | ||||
| pub struct BrocadeOptions { | ||||
|     pub dry_run: bool, | ||||
|     pub ssh: ssh::SshOptions, | ||||
|     pub timeouts: TimeoutConfig, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug)] | ||||
| pub struct TimeoutConfig { | ||||
|     pub shell_ready: Duration, | ||||
|     pub command_execution: Duration, | ||||
|     pub command_output: Duration, | ||||
|     pub cleanup: Duration, | ||||
|     pub message_wait: Duration, | ||||
| } | ||||
| 
 | ||||
| impl Default for TimeoutConfig { | ||||
|     fn default() -> Self { | ||||
|         Self { | ||||
|             shell_ready: Duration::from_secs(10), | ||||
|             command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
 | ||||
|             command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
 | ||||
|             cleanup: Duration::from_secs(10), | ||||
|             message_wait: Duration::from_millis(500), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| enum ExecutionMode { | ||||
|     Regular, | ||||
|     Privileged, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug)] | ||||
| pub struct BrocadeInfo { | ||||
|     os: BrocadeOs, | ||||
|     version: String, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug)] | ||||
| pub enum BrocadeOs { | ||||
|     NetworkOperatingSystem, | ||||
|     FastIron, | ||||
|     Unknown, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] | ||||
| pub struct MacAddressEntry { | ||||
|     pub vlan: u16, | ||||
|     pub mac_address: MacAddress, | ||||
|     pub port: PortDeclaration, | ||||
| } | ||||
| 
 | ||||
| pub type PortChannelId = u8; | ||||
| 
 | ||||
| /// Represents a single physical or logical link connecting two switches within a stack or fabric.
 | ||||
| ///
 | ||||
| /// This structure provides a standardized view of the topology regardless of the
 | ||||
| /// underlying Brocade OS configuration (stacking vs. fabric).
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone)] | ||||
| pub struct InterSwitchLink { | ||||
|     /// The local port on the switch where the topology command was run.
 | ||||
|     pub local_port: PortLocation, | ||||
|     /// The port on the directly connected neighboring switch.
 | ||||
|     pub remote_port: Option<PortLocation>, | ||||
| } | ||||
| 
 | ||||
| /// Represents the key running configuration status of a single switch interface.
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone)] | ||||
| pub struct InterfaceInfo { | ||||
|     /// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
 | ||||
|     pub name: String, | ||||
|     /// The physical location of the interface.
 | ||||
|     pub port_location: PortLocation, | ||||
|     /// The parsed type and name prefix of the interface.
 | ||||
|     pub interface_type: InterfaceType, | ||||
|     /// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
 | ||||
|     pub operating_mode: Option<PortOperatingMode>, | ||||
|     /// Indicates the current state of the interface.
 | ||||
|     pub status: InterfaceStatus, | ||||
| } | ||||
| 
 | ||||
| /// Categorizes the functional type of a switch interface.
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone)] | ||||
| pub enum InterfaceType { | ||||
|     /// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
 | ||||
|     Ethernet(String), | ||||
| } | ||||
| 
 | ||||
| impl fmt::Display for InterfaceType { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         match self { | ||||
|             InterfaceType::Ethernet(name) => write!(f, "{name}"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone)] | ||||
| pub enum PortOperatingMode { | ||||
|     /// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
 | ||||
|     Fabric, | ||||
|     /// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
 | ||||
|     Trunk, | ||||
|     /// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
 | ||||
|     Access, | ||||
| } | ||||
| 
 | ||||
| /// Defines the possible status of an interface.
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone)] | ||||
| pub enum InterfaceStatus { | ||||
|     /// The interface is connected.
 | ||||
|     Connected, | ||||
|     /// The interface is not connected and is not expected to be.
 | ||||
|     NotConnected, | ||||
|     /// The interface is not connected but is expected to be (configured with `no shutdown`).
 | ||||
|     SfpAbsent, | ||||
| } | ||||
| 
 | ||||
| pub async fn init( | ||||
|     ip_addresses: &[IpAddr], | ||||
|     port: u16, | ||||
|     username: &str, | ||||
|     password: &str, | ||||
|     options: Option<BrocadeOptions>, | ||||
| ) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> { | ||||
|     let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?; | ||||
| 
 | ||||
|     let version_info = shell | ||||
|         .with_session(ExecutionMode::Regular, |session| { | ||||
|             Box::pin(get_brocade_info(session)) | ||||
|         }) | ||||
|         .await?; | ||||
| 
 | ||||
|     Ok(match version_info.os { | ||||
|         BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)), | ||||
|         BrocadeOs::NetworkOperatingSystem => { | ||||
|             Box::new(NetworkOperatingSystemClient::init(shell, version_info)) | ||||
|         } | ||||
|         BrocadeOs::Unknown => todo!(), | ||||
|     }) | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait BrocadeClient: std::fmt::Debug { | ||||
|     /// Retrieves the operating system and version details from the connected Brocade switch.
 | ||||
|     ///
 | ||||
|     /// This is typically the first call made after establishing a connection to determine
 | ||||
|     /// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
 | ||||
|     ///
 | ||||
|     /// # Returns
 | ||||
|     ///
 | ||||
|     /// A `BrocadeInfo` structure containing parsed OS type and version string.
 | ||||
|     async fn version(&self) -> Result<BrocadeInfo, Error>; | ||||
| 
 | ||||
|     /// Retrieves the dynamically learned MAC address table from the switch.
 | ||||
|     ///
 | ||||
|     /// This is crucial for discovering where specific network endpoints (MAC addresses)
 | ||||
|     /// are currently located on the physical ports.
 | ||||
|     ///
 | ||||
|     /// # Returns
 | ||||
|     ///
 | ||||
|     /// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
 | ||||
|     /// and the associated port name/index.
 | ||||
|     async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>; | ||||
| 
 | ||||
|     /// Derives the physical connections used to link multiple switches together
 | ||||
|     /// to form a single logical entity (stack, fabric, etc.).
 | ||||
|     ///
 | ||||
|     /// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
 | ||||
|     /// to return a standardized view of the topology.
 | ||||
|     ///
 | ||||
|     /// # Returns
 | ||||
|     ///
 | ||||
|     /// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
 | ||||
|     /// If the switch is not stacked, returns an empty vector.
 | ||||
|     async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>; | ||||
| 
 | ||||
|     /// Retrieves the status for all interfaces
 | ||||
|     ///
 | ||||
|     /// # Returns
 | ||||
|     ///
 | ||||
|     /// A vector of `InterfaceInfo` structures.
 | ||||
|     async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>; | ||||
| 
 | ||||
|     /// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
 | ||||
|     async fn configure_interfaces( | ||||
|         &self, | ||||
|         interfaces: Vec<(String, PortOperatingMode)>, | ||||
|     ) -> Result<(), Error>; | ||||
| 
 | ||||
|     /// Scans the existing configuration to find the next available (unused)
 | ||||
|     /// Port-Channel ID (`lag` or `trunk`) for assignment.
 | ||||
|     ///
 | ||||
|     /// # Returns
 | ||||
|     ///
 | ||||
|     /// The smallest, unassigned `PortChannelId` within the supported range.
 | ||||
|     async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>; | ||||
| 
 | ||||
|     /// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
 | ||||
|     /// using the specified channel ID and ports.
 | ||||
|     ///
 | ||||
|     /// The resulting configuration must be persistent (saved to startup-config).
 | ||||
|     /// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
 | ||||
|     ///
 | ||||
|     /// # Parameters
 | ||||
|     ///
 | ||||
|     /// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
 | ||||
|     /// * `channel_name`: A descriptive name for the LAG (used in configuration context).
 | ||||
|     /// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
 | ||||
|     async fn create_port_channel( | ||||
|         &self, | ||||
|         channel_id: PortChannelId, | ||||
|         channel_name: &str, | ||||
|         ports: &[PortLocation], | ||||
|     ) -> Result<(), Error>; | ||||
| 
 | ||||
|     /// Removes all configuration associated with the specified Port-Channel name.
 | ||||
|     ///
 | ||||
|     /// This operation should be idempotent; attempting to clear a non-existent
 | ||||
|     /// channel should succeed (or return a benign error).
 | ||||
|     ///
 | ||||
|     /// # Parameters
 | ||||
|     ///
 | ||||
|     /// * `channel_name`: The name of the Port-Channel (LAG) to delete.
 | ||||
|     ///
 | ||||
|     async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>; | ||||
| } | ||||
| 
 | ||||
| async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> { | ||||
|     let output = session.run_command("show version").await?; | ||||
| 
 | ||||
|     if output.contains("Network Operating System") { | ||||
|         let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)") | ||||
|             .expect("Invalid regex"); | ||||
|         let version = re | ||||
|             .captures(&output) | ||||
|             .and_then(|cap| cap.name("version")) | ||||
|             .map(|m| m.as_str().to_string()) | ||||
|             .unwrap_or_default(); | ||||
| 
 | ||||
|         return Ok(BrocadeInfo { | ||||
|             os: BrocadeOs::NetworkOperatingSystem, | ||||
|             version, | ||||
|         }); | ||||
|     } else if output.contains("ICX") { | ||||
|         let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)") | ||||
|             .expect("Invalid regex"); | ||||
|         let version = re | ||||
|             .captures(&output) | ||||
|             .and_then(|cap| cap.name("version")) | ||||
|             .map(|m| m.as_str().to_string()) | ||||
|             .unwrap_or_default(); | ||||
| 
 | ||||
|         return Ok(BrocadeInfo { | ||||
|             os: BrocadeOs::FastIron, | ||||
|             version, | ||||
|         }); | ||||
|     } | ||||
| 
 | ||||
|     Err(Error::UnexpectedError("Unknown Brocade OS version".into())) | ||||
| } | ||||
| 
 | ||||
| fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> { | ||||
|     let cleaned_mac = value.replace('.', ""); | ||||
| 
 | ||||
|     if cleaned_mac.len() != 12 { | ||||
|         return Err(format!("Invalid MAC address: {value}")); | ||||
|     } | ||||
| 
 | ||||
|     let mut bytes = [0u8; 6]; | ||||
|     for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() { | ||||
|         let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?; | ||||
|         bytes[i] = | ||||
|             u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?; | ||||
|     } | ||||
| 
 | ||||
|     Ok(MacAddress(bytes)) | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub enum Error { | ||||
|     NetworkError(String), | ||||
|     AuthenticationError(String), | ||||
|     ConfigurationError(String), | ||||
|     TimeoutError(String), | ||||
|     UnexpectedError(String), | ||||
|     CommandError(String), | ||||
| } | ||||
| 
 | ||||
| impl Display for Error { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         match self { | ||||
|             Error::NetworkError(msg) => write!(f, "Network error: {msg}"), | ||||
|             Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"), | ||||
|             Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"), | ||||
|             Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"), | ||||
|             Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"), | ||||
|             Error::CommandError(msg) => write!(f, "{msg}"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<Error> for String { | ||||
|     fn from(val: Error) -> Self { | ||||
|         format!("{val}") | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl std::error::Error for Error {} | ||||
| 
 | ||||
| impl From<russh::Error> for Error { | ||||
|     fn from(value: russh::Error) -> Self { | ||||
|         Error::NetworkError(format!("Russh client error: {value}")) | ||||
|     } | ||||
| } | ||||
							
								
								
									
										333
									
								
								brocade/src/network_operating_system.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,333 @@ | ||||
| use std::str::FromStr; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::switch::{PortDeclaration, PortLocation}; | ||||
| use log::{debug, info}; | ||||
| use regex::Regex; | ||||
| 
 | ||||
| use crate::{ | ||||
|     BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, | ||||
|     InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, | ||||
|     parse_brocade_mac_address, shell::BrocadeShell, | ||||
| }; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct NetworkOperatingSystemClient { | ||||
|     shell: BrocadeShell, | ||||
|     version: BrocadeInfo, | ||||
| } | ||||
| 
 | ||||
| impl NetworkOperatingSystemClient { | ||||
|     pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self { | ||||
|         shell.before_all(vec!["terminal length 0".into()]); | ||||
| 
 | ||||
|         Self { | ||||
|             shell, | ||||
|             version: version_info, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> { | ||||
|         debug!("[Brocade] Parsing mac address entry: {line}"); | ||||
|         let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|         if parts.len() < 5 { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let (vlan, mac_address, port) = match parts.len() { | ||||
|             5 => ( | ||||
|                 u16::from_str(parts[0]).ok()?, | ||||
|                 parse_brocade_mac_address(parts[1]).ok()?, | ||||
|                 parts[4].to_string(), | ||||
|             ), | ||||
|             _ => ( | ||||
|                 u16::from_str(parts[0]).ok()?, | ||||
|                 parse_brocade_mac_address(parts[1]).ok()?, | ||||
|                 parts[5].to_string(), | ||||
|             ), | ||||
|         }; | ||||
| 
 | ||||
|         let port = | ||||
|             PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}"))); | ||||
| 
 | ||||
|         match port { | ||||
|             Ok(p) => Some(Ok(MacAddressEntry { | ||||
|                 vlan, | ||||
|                 mac_address, | ||||
|                 port: p, | ||||
|             })), | ||||
|             Err(e) => Some(Err(e)), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> { | ||||
|         debug!("[Brocade] Parsing inter switch link entry: {line}"); | ||||
|         let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|         if parts.len() < 10 { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let local_port = PortLocation::from_str(parts[2]).ok()?; | ||||
|         let remote_port = PortLocation::from_str(parts[5]).ok()?; | ||||
| 
 | ||||
|         Some(Ok(InterSwitchLink { | ||||
|             local_port, | ||||
|             remote_port: Some(remote_port), | ||||
|         })) | ||||
|     } | ||||
| 
 | ||||
|     fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> { | ||||
|         debug!("[Brocade] Parsing interface status entry: {line}"); | ||||
|         let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|         if parts.len() < 6 { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let interface_type = match parts[0] { | ||||
|             "Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()), | ||||
|             "Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()), | ||||
|             _ => return None, | ||||
|         }; | ||||
|         let port_location = PortLocation::from_str(parts[1]).ok()?; | ||||
|         let status = match parts[2] { | ||||
|             "connected" => InterfaceStatus::Connected, | ||||
|             "notconnected" => InterfaceStatus::NotConnected, | ||||
|             "sfpAbsent" => InterfaceStatus::SfpAbsent, | ||||
|             _ => return None, | ||||
|         }; | ||||
|         let operating_mode = match parts[3] { | ||||
|             "ISL" => Some(PortOperatingMode::Fabric), | ||||
|             "Trunk" => Some(PortOperatingMode::Trunk), | ||||
|             "Access" => Some(PortOperatingMode::Access), | ||||
|             "--" => None, | ||||
|             _ => return None, | ||||
|         }; | ||||
| 
 | ||||
|         Some(Ok(InterfaceInfo { | ||||
|             name: format!("{interface_type} {port_location}"), | ||||
|             port_location, | ||||
|             interface_type, | ||||
|             operating_mode, | ||||
|             status, | ||||
|         })) | ||||
|     } | ||||
| 
 | ||||
|     fn map_configure_interfaces_error(&self, err: Error) -> Error { | ||||
|         debug!("[Brocade] {err}"); | ||||
| 
 | ||||
|         if let Error::CommandError(message) = &err { | ||||
|             if message.contains("switchport") | ||||
|                 && message.contains("Cannot configure aggregator member") | ||||
|             { | ||||
|                 let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap(); | ||||
| 
 | ||||
|                 if let Some(caps) = re.captures(message) { | ||||
|                     let interface_type = &caps[1]; | ||||
|                     let port_location = &caps[2]; | ||||
|                     let interface = format!("{interface_type} {port_location}"); | ||||
| 
 | ||||
|                     return Error::CommandError(format!( | ||||
|                         "Cannot configure interface '{interface}', it is a member of a port-channel (LAG)" | ||||
|                     )); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         err | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl BrocadeClient for NetworkOperatingSystemClient { | ||||
|     async fn version(&self) -> Result<BrocadeInfo, Error> { | ||||
|         Ok(self.version.clone()) | ||||
|     } | ||||
| 
 | ||||
|     async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> { | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show mac-address-table", ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         output | ||||
|             .lines() | ||||
|             .skip(1) | ||||
|             .filter_map(|line| self.parse_mac_entry(line)) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> { | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show fabric isl", ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         output | ||||
|             .lines() | ||||
|             .skip(6) | ||||
|             .filter_map(|line| self.parse_inter_switch_link_entry(line)) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> { | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command( | ||||
|                 "show interface status rbridge-id all", | ||||
|                 ExecutionMode::Regular, | ||||
|             ) | ||||
|             .await?; | ||||
| 
 | ||||
|         output | ||||
|             .lines() | ||||
|             .skip(2) | ||||
|             .filter_map(|line| self.parse_interface_status_entry(line)) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_interfaces( | ||||
|         &self, | ||||
|         interfaces: Vec<(String, PortOperatingMode)>, | ||||
|     ) -> Result<(), Error> { | ||||
|         info!("[Brocade] Configuring {} interface(s)...", interfaces.len()); | ||||
| 
 | ||||
|         let mut commands = vec!["configure terminal".to_string()]; | ||||
| 
 | ||||
|         for interface in interfaces { | ||||
|             commands.push(format!("interface {}", interface.0)); | ||||
| 
 | ||||
|             match interface.1 { | ||||
|                 PortOperatingMode::Fabric => { | ||||
|                     commands.push("fabric isl enable".into()); | ||||
|                     commands.push("fabric trunk enable".into()); | ||||
|                 } | ||||
|                 PortOperatingMode::Trunk => { | ||||
|                     commands.push("switchport".into()); | ||||
|                     commands.push("switchport mode trunk".into()); | ||||
|                     commands.push("no spanning-tree shutdown".into()); | ||||
|                     commands.push("no fabric isl enable".into()); | ||||
|                     commands.push("no fabric trunk enable".into()); | ||||
|                 } | ||||
|                 PortOperatingMode::Access => { | ||||
|                     commands.push("switchport".into()); | ||||
|                     commands.push("switchport mode access".into()); | ||||
|                     commands.push("switchport access vlan 1".into()); | ||||
|                     commands.push("no spanning-tree shutdown".into()); | ||||
|                     commands.push("no fabric isl enable".into()); | ||||
|                     commands.push("no fabric trunk enable".into()); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             commands.push("no shutdown".into()); | ||||
|             commands.push("exit".into()); | ||||
|         } | ||||
| 
 | ||||
|         self.shell | ||||
|             .run_commands(commands, ExecutionMode::Regular) | ||||
|             .await | ||||
|             .map_err(|err| self.map_configure_interfaces_error(err))?; | ||||
| 
 | ||||
|         info!("[Brocade] Interfaces configured."); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> { | ||||
|         info!("[Brocade] Finding next available channel id..."); | ||||
| 
 | ||||
|         let output = self | ||||
|             .shell | ||||
|             .run_command("show port-channel summary", ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         let used_ids: Vec<u8> = output | ||||
|             .lines() | ||||
|             .skip(6) | ||||
|             .filter_map(|line| { | ||||
|                 let parts: Vec<&str> = line.split_whitespace().collect(); | ||||
|                 if parts.len() < 8 { | ||||
|                     return None; | ||||
|                 } | ||||
| 
 | ||||
|                 u8::from_str(parts[0]).ok() | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let mut next_id: u8 = 1; | ||||
|         loop { | ||||
|             if !used_ids.contains(&next_id) { | ||||
|                 break; | ||||
|             } | ||||
|             next_id += 1; | ||||
|         } | ||||
| 
 | ||||
|         info!("[Brocade] Found channel id: {next_id}"); | ||||
|         Ok(next_id) | ||||
|     } | ||||
| 
 | ||||
|     async fn create_port_channel( | ||||
|         &self, | ||||
|         channel_id: PortChannelId, | ||||
|         channel_name: &str, | ||||
|         ports: &[PortLocation], | ||||
|     ) -> Result<(), Error> { | ||||
|         info!( | ||||
|             "[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}", | ||||
|             ports | ||||
|                 .iter() | ||||
|                 .map(|p| format!("{p}")) | ||||
|                 .collect::<Vec<String>>() | ||||
|                 .join(", ") | ||||
|         ); | ||||
| 
 | ||||
|         let interfaces = self.get_interfaces().await?; | ||||
| 
 | ||||
|         let mut commands = vec![ | ||||
|             "configure terminal".into(), | ||||
|             format!("interface port-channel {}", channel_id), | ||||
|             "no shutdown".into(), | ||||
|             "exit".into(), | ||||
|         ]; | ||||
| 
 | ||||
|         for port in ports { | ||||
|             let interface = interfaces.iter().find(|i| i.port_location == *port); | ||||
|             let Some(interface) = interface else { | ||||
|                 continue; | ||||
|             }; | ||||
| 
 | ||||
|             commands.push(format!("interface {}", interface.name)); | ||||
|             commands.push("no switchport".into()); | ||||
|             commands.push("no ip address".into()); | ||||
|             commands.push("no fabric isl enable".into()); | ||||
|             commands.push("no fabric trunk enable".into()); | ||||
|             commands.push(format!("channel-group {channel_id} mode active")); | ||||
|             commands.push("no shutdown".into()); | ||||
|             commands.push("exit".into()); | ||||
|         } | ||||
| 
 | ||||
|         self.shell | ||||
|             .run_commands(commands, ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         info!("[Brocade] Port-channel '{channel_name}' configured."); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> { | ||||
|         info!("[Brocade] Clearing port-channel: {channel_name}"); | ||||
| 
 | ||||
|         let commands = vec![ | ||||
|             "configure terminal".into(), | ||||
|             format!("no interface port-channel {}", channel_name), | ||||
|             "exit".into(), | ||||
|         ]; | ||||
| 
 | ||||
|         self.shell | ||||
|             .run_commands(commands, ExecutionMode::Regular) | ||||
|             .await?; | ||||
| 
 | ||||
|         info!("[Brocade] Port-channel '{channel_name}' cleared."); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
							
								
								
									
										370
									
								
								brocade/src/shell.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,370 @@ | ||||
| use std::net::IpAddr; | ||||
| use std::time::Duration; | ||||
| use std::time::Instant; | ||||
| 
 | ||||
| use crate::BrocadeOptions; | ||||
| use crate::Error; | ||||
| use crate::ExecutionMode; | ||||
| use crate::TimeoutConfig; | ||||
| use crate::ssh; | ||||
| 
 | ||||
| use log::debug; | ||||
| use log::info; | ||||
| use russh::ChannelMsg; | ||||
| use tokio::time::timeout; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct BrocadeShell { | ||||
|     ip: IpAddr, | ||||
|     port: u16, | ||||
|     username: String, | ||||
|     password: String, | ||||
|     options: BrocadeOptions, | ||||
|     before_all_commands: Vec<String>, | ||||
|     after_all_commands: Vec<String>, | ||||
| } | ||||
| 
 | ||||
| impl BrocadeShell { | ||||
|     pub async fn init( | ||||
|         ip_addresses: &[IpAddr], | ||||
|         port: u16, | ||||
|         username: &str, | ||||
|         password: &str, | ||||
|         options: Option<BrocadeOptions>, | ||||
|     ) -> Result<Self, Error> { | ||||
|         let ip = ip_addresses | ||||
|             .first() | ||||
|             .ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?; | ||||
| 
 | ||||
|         let base_options = options.unwrap_or_default(); | ||||
|         let options = ssh::try_init_client(username, password, ip, base_options).await?; | ||||
| 
 | ||||
|         Ok(Self { | ||||
|             ip: *ip, | ||||
|             port, | ||||
|             username: username.to_string(), | ||||
|             password: password.to_string(), | ||||
|             before_all_commands: vec![], | ||||
|             after_all_commands: vec![], | ||||
|             options, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> { | ||||
|         BrocadeSession::open( | ||||
|             self.ip, | ||||
|             self.port, | ||||
|             &self.username, | ||||
|             &self.password, | ||||
|             self.options.clone(), | ||||
|             mode, | ||||
|         ) | ||||
|         .await | ||||
|     } | ||||
| 
 | ||||
|     pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error> | ||||
|     where | ||||
|         F: FnOnce( | ||||
|             &mut BrocadeSession, | ||||
|         ) -> std::pin::Pin< | ||||
|             Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>, | ||||
|         >, | ||||
|     { | ||||
|         let mut session = self.open_session(mode).await?; | ||||
| 
 | ||||
|         let _ = session.run_commands(self.before_all_commands.clone()).await; | ||||
|         let result = callback(&mut session).await; | ||||
|         let _ = session.run_commands(self.after_all_commands.clone()).await; | ||||
| 
 | ||||
|         session.close().await?; | ||||
|         result | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> { | ||||
|         let mut session = self.open_session(mode).await?; | ||||
| 
 | ||||
|         let _ = session.run_commands(self.before_all_commands.clone()).await; | ||||
|         let result = session.run_command(command).await; | ||||
|         let _ = session.run_commands(self.after_all_commands.clone()).await; | ||||
| 
 | ||||
|         session.close().await?; | ||||
|         result | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run_commands( | ||||
|         &self, | ||||
|         commands: Vec<String>, | ||||
|         mode: ExecutionMode, | ||||
|     ) -> Result<(), Error> { | ||||
|         let mut session = self.open_session(mode).await?; | ||||
| 
 | ||||
|         let _ = session.run_commands(self.before_all_commands.clone()).await; | ||||
|         let result = session.run_commands(commands).await; | ||||
|         let _ = session.run_commands(self.after_all_commands.clone()).await; | ||||
| 
 | ||||
|         session.close().await?; | ||||
|         result | ||||
|     } | ||||
| 
 | ||||
|     pub fn before_all(&mut self, commands: Vec<String>) { | ||||
|         self.before_all_commands = commands; | ||||
|     } | ||||
| 
 | ||||
|     pub fn after_all(&mut self, commands: Vec<String>) { | ||||
|         self.after_all_commands = commands; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct BrocadeSession { | ||||
|     pub channel: russh::Channel<russh::client::Msg>, | ||||
|     pub mode: ExecutionMode, | ||||
|     pub options: BrocadeOptions, | ||||
| } | ||||
| 
 | ||||
| impl BrocadeSession { | ||||
|     pub async fn open( | ||||
|         ip: IpAddr, | ||||
|         port: u16, | ||||
|         username: &str, | ||||
|         password: &str, | ||||
|         options: BrocadeOptions, | ||||
|         mode: ExecutionMode, | ||||
|     ) -> Result<Self, Error> { | ||||
|         let client = ssh::create_client(ip, port, username, password, &options).await?; | ||||
|         let mut channel = client.channel_open_session().await?; | ||||
| 
 | ||||
|         channel | ||||
|             .request_pty(false, "vt100", 80, 24, 0, 0, &[]) | ||||
|             .await?; | ||||
|         channel.request_shell(false).await?; | ||||
| 
 | ||||
|         wait_for_shell_ready(&mut channel, &options.timeouts).await?; | ||||
| 
 | ||||
|         if let ExecutionMode::Privileged = mode { | ||||
|             try_elevate_session(&mut channel, username, password, &options.timeouts).await?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(Self { | ||||
|             channel, | ||||
|             mode, | ||||
|             options, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn close(&mut self) -> Result<(), Error> { | ||||
|         debug!("[Brocade] Closing session..."); | ||||
| 
 | ||||
|         self.channel.data(&b"exit\n"[..]).await?; | ||||
|         if let ExecutionMode::Privileged = self.mode { | ||||
|             self.channel.data(&b"exit\n"[..]).await?; | ||||
|         } | ||||
| 
 | ||||
|         let start = Instant::now(); | ||||
|         while start.elapsed() < self.options.timeouts.cleanup { | ||||
|             match timeout(self.options.timeouts.message_wait, self.channel.wait()).await { | ||||
|                 Ok(Some(ChannelMsg::Close)) => break, | ||||
|                 Ok(Some(_)) => continue, | ||||
|                 Ok(None) | Err(_) => break, | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         debug!("[Brocade] Session closed."); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run_command(&mut self, command: &str) -> Result<String, Error> { | ||||
|         if self.should_skip_command(command) { | ||||
|             return Ok(String::new()); | ||||
|         } | ||||
| 
 | ||||
|         debug!("[Brocade] Running command: '{command}'..."); | ||||
| 
 | ||||
|         self.channel | ||||
|             .data(format!("{}\n", command).as_bytes()) | ||||
|             .await?; | ||||
|         tokio::time::sleep(Duration::from_millis(100)).await; | ||||
| 
 | ||||
|         let output = self.collect_command_output().await?; | ||||
|         let output = String::from_utf8(output) | ||||
|             .map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?; | ||||
| 
 | ||||
|         self.check_for_command_errors(&output, command)?; | ||||
|         Ok(output) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> { | ||||
|         for command in commands { | ||||
|             self.run_command(&command).await?; | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     fn should_skip_command(&self, command: &str) -> bool { | ||||
|         if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run { | ||||
|             info!("[Brocade] Dry-run mode enabled, skipping command: {command}"); | ||||
|             return true; | ||||
|         } | ||||
|         false | ||||
|     } | ||||
| 
 | ||||
|     async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> { | ||||
|         let mut output = Vec::new(); | ||||
|         let start = Instant::now(); | ||||
|         let read_timeout = Duration::from_millis(500); | ||||
|         let log_interval = Duration::from_secs(5); | ||||
|         let mut last_log = Instant::now(); | ||||
| 
 | ||||
|         loop { | ||||
|             if start.elapsed() > self.options.timeouts.command_execution { | ||||
|                 return Err(Error::TimeoutError( | ||||
|                     "Timeout waiting for command completion.".into(), | ||||
|                 )); | ||||
|             } | ||||
| 
 | ||||
|             if start.elapsed() > self.options.timeouts.command_output | ||||
|                 && last_log.elapsed() > log_interval | ||||
|             { | ||||
|                 info!("[Brocade] Waiting for command output..."); | ||||
|                 last_log = Instant::now(); | ||||
|             } | ||||
| 
 | ||||
|             match timeout(read_timeout, self.channel.wait()).await { | ||||
|                 Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => { | ||||
|                     output.extend_from_slice(&data); | ||||
|                     let current_output = String::from_utf8_lossy(&output); | ||||
|                     if current_output.contains('>') || current_output.contains('#') { | ||||
|                         return Ok(output); | ||||
|                     } | ||||
|                 } | ||||
|                 Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output), | ||||
|                 Ok(Some(ChannelMsg::ExitStatus { exit_status })) => { | ||||
|                     debug!("[Brocade] Command exit status: {exit_status}"); | ||||
|                 } | ||||
|                 Ok(Some(_)) => continue, | ||||
|                 Ok(None) | Err(_) => { | ||||
|                     if output.is_empty() { | ||||
|                         if let Ok(None) = timeout(read_timeout, self.channel.wait()).await { | ||||
|                             break; | ||||
|                         } | ||||
|                         continue; | ||||
|                     } | ||||
| 
 | ||||
|                     tokio::time::sleep(Duration::from_millis(100)).await; | ||||
|                     let current_output = String::from_utf8_lossy(&output); | ||||
|                     if current_output.contains('>') || current_output.contains('#') { | ||||
|                         return Ok(output); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(output) | ||||
|     } | ||||
| 
 | ||||
|     fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> { | ||||
|         const ERROR_PATTERNS: &[&str] = &[ | ||||
|             "invalid input", | ||||
|             "syntax error", | ||||
|             "command not found", | ||||
|             "unknown command", | ||||
|             "permission denied", | ||||
|             "access denied", | ||||
|             "authentication failed", | ||||
|             "configuration error", | ||||
|             "failed to", | ||||
|             "error:", | ||||
|         ]; | ||||
| 
 | ||||
|         let output_lower = output.to_lowercase(); | ||||
|         if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) { | ||||
|             return Err(Error::CommandError(format!( | ||||
|                 "Command error: {}", | ||||
|                 output.trim() | ||||
|             ))); | ||||
|         } | ||||
| 
 | ||||
|         if !command.starts_with("show") && output.trim().is_empty() { | ||||
|             return Err(Error::CommandError(format!( | ||||
|                 "Command '{command}' produced no output" | ||||
|             ))); | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| async fn wait_for_shell_ready( | ||||
|     channel: &mut russh::Channel<russh::client::Msg>, | ||||
|     timeouts: &TimeoutConfig, | ||||
| ) -> Result<(), Error> { | ||||
|     let mut buffer = Vec::new(); | ||||
|     let start = Instant::now(); | ||||
| 
 | ||||
|     while start.elapsed() < timeouts.shell_ready { | ||||
|         match timeout(timeouts.message_wait, channel.wait()).await { | ||||
|             Ok(Some(ChannelMsg::Data { data })) => { | ||||
|                 buffer.extend_from_slice(&data); | ||||
|                 let output = String::from_utf8_lossy(&buffer); | ||||
|                 let output = output.trim(); | ||||
|                 if output.ends_with('>') || output.ends_with('#') { | ||||
|                     debug!("[Brocade] Shell ready"); | ||||
|                     return Ok(()); | ||||
|                 } | ||||
|             } | ||||
|             Ok(Some(_)) => continue, | ||||
|             Ok(None) => break, | ||||
|             Err(_) => continue, | ||||
|         } | ||||
|     } | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| async fn try_elevate_session( | ||||
|     channel: &mut russh::Channel<russh::client::Msg>, | ||||
|     username: &str, | ||||
|     password: &str, | ||||
|     timeouts: &TimeoutConfig, | ||||
| ) -> Result<(), Error> { | ||||
|     channel.data(&b"enable\n"[..]).await?; | ||||
|     let start = Instant::now(); | ||||
|     let mut buffer = Vec::new(); | ||||
| 
 | ||||
|     while start.elapsed() < timeouts.shell_ready { | ||||
|         match timeout(timeouts.message_wait, channel.wait()).await { | ||||
|             Ok(Some(ChannelMsg::Data { data })) => { | ||||
|                 buffer.extend_from_slice(&data); | ||||
|                 let output = String::from_utf8_lossy(&buffer); | ||||
| 
 | ||||
|                 if output.ends_with('#') { | ||||
|                     debug!("[Brocade] Privileged mode established"); | ||||
|                     return Ok(()); | ||||
|                 } | ||||
| 
 | ||||
|                 if output.contains("User Name:") { | ||||
|                     channel.data(format!("{}\n", username).as_bytes()).await?; | ||||
|                     buffer.clear(); | ||||
|                 } else if output.contains("Password:") { | ||||
|                     channel.data(format!("{}\n", password).as_bytes()).await?; | ||||
|                     buffer.clear(); | ||||
|                 } else if output.contains('>') { | ||||
|                     return Err(Error::AuthenticationError( | ||||
|                         "Enable authentication failed".into(), | ||||
|                     )); | ||||
|                 } | ||||
|             } | ||||
|             Ok(Some(_)) => continue, | ||||
|             Ok(None) => break, | ||||
|             Err(_) => continue, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     let output = String::from_utf8_lossy(&buffer); | ||||
|     if output.ends_with('#') { | ||||
|         debug!("[Brocade] Privileged mode established"); | ||||
|         Ok(()) | ||||
|     } else { | ||||
|         Err(Error::AuthenticationError(format!( | ||||
|             "Enable failed. Output:\n{output}" | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
							
								
								
									
										113
									
								
								brocade/src/ssh.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,113 @@ | ||||
| use std::borrow::Cow; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use russh::client::Handler; | ||||
| use russh::kex::DH_G1_SHA1; | ||||
| use russh::kex::ECDH_SHA2_NISTP256; | ||||
| use russh_keys::key::SSH_RSA; | ||||
| 
 | ||||
| use super::BrocadeOptions; | ||||
| use super::Error; | ||||
| 
 | ||||
| #[derive(Default, Clone, Debug)] | ||||
| pub struct SshOptions { | ||||
|     pub preferred_algorithms: russh::Preferred, | ||||
| } | ||||
| 
 | ||||
| impl SshOptions { | ||||
|     fn ecdhsa_sha2_nistp256() -> Self { | ||||
|         Self { | ||||
|             preferred_algorithms: russh::Preferred { | ||||
|                 kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]), | ||||
|                 key: Cow::Borrowed(&[SSH_RSA]), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn legacy() -> Self { | ||||
|         Self { | ||||
|             preferred_algorithms: russh::Preferred { | ||||
|                 kex: Cow::Borrowed(&[DH_G1_SHA1]), | ||||
|                 key: Cow::Borrowed(&[SSH_RSA]), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct Client; | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Handler for Client { | ||||
|     type Error = Error; | ||||
| 
 | ||||
|     async fn check_server_key( | ||||
|         &mut self, | ||||
|         _server_public_key: &russh_keys::key::PublicKey, | ||||
|     ) -> Result<bool, Self::Error> { | ||||
|         Ok(true) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub async fn try_init_client( | ||||
|     username: &str, | ||||
|     password: &str, | ||||
|     ip: &std::net::IpAddr, | ||||
|     base_options: BrocadeOptions, | ||||
| ) -> Result<BrocadeOptions, Error> { | ||||
|     let ssh_options = vec![ | ||||
|         SshOptions::default(), | ||||
|         SshOptions::ecdhsa_sha2_nistp256(), | ||||
|         SshOptions::legacy(), | ||||
|     ]; | ||||
| 
 | ||||
|     for ssh in ssh_options { | ||||
|         let opts = BrocadeOptions { | ||||
|             ssh, | ||||
|             ..base_options.clone() | ||||
|         }; | ||||
|         let client = create_client(*ip, 22, username, password, &opts).await; | ||||
| 
 | ||||
|         match client { | ||||
|             Ok(_) => { | ||||
|                 return Ok(opts); | ||||
|             } | ||||
|             Err(e) => match e { | ||||
|                 Error::NetworkError(e) => { | ||||
|                     if e.contains("No common key exchange algorithm") { | ||||
|                         continue; | ||||
|                     } else { | ||||
|                         return Err(Error::NetworkError(e)); | ||||
|                     } | ||||
|                 } | ||||
|                 _ => return Err(e), | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     Err(Error::NetworkError( | ||||
|         "Could not establish ssh connection: wrong key exchange algorithm)".to_string(), | ||||
|     )) | ||||
| } | ||||
| 
 | ||||
| pub async fn create_client( | ||||
|     ip: std::net::IpAddr, | ||||
|     port: u16, | ||||
|     username: &str, | ||||
|     password: &str, | ||||
|     options: &BrocadeOptions, | ||||
| ) -> Result<russh::client::Handle<Client>, Error> { | ||||
|     let config = russh::client::Config { | ||||
|         preferred: options.ssh.preferred_algorithms.clone(), | ||||
|         ..Default::default() | ||||
|     }; | ||||
|     let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?; | ||||
|     if !client.authenticate_password(username, password).await? { | ||||
|         return Err(Error::AuthenticationError( | ||||
|             "ssh authentication failed".to_string(), | ||||
|         )); | ||||
|     } | ||||
|     Ok(client) | ||||
| } | ||||
							
								
								
									
										3
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,3 @@ | ||||
| .terraform | ||||
| *.tfstate | ||||
| venv | ||||
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/75_years_later.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 72 KiB | 
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/Happy_swimmer.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 38 KiB | 
| After Width: | Height: | Size: 38 KiB | 
| After Width: | Height: | Size: 52 KiB | 
| After Width: | Height: | Size: 62 KiB | 
| After Width: | Height: | Size: 64 KiB | 
| After Width: | Height: | Size: 100 KiB | 
							
								
								
									
										5
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,5 @@ | ||||
| To build : | ||||
| 
 | ||||
| ```bash | ||||
| npx @marp-team/marp-cli@latest -w slides.md | ||||
| ``` | ||||
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/ansible.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 11 KiB | 
| @ -0,0 +1,9 @@ | ||||
| To run this : | ||||
| 
 | ||||
| ```bash | ||||
| virtualenv venv | ||||
| source venv/bin/activate | ||||
| pip install ansible ansible-dev-tools | ||||
| ansible-lint download.yml | ||||
| ansible-playbook -i localhost download.yml | ||||
| ``` | ||||
| @ -0,0 +1,8 @@ | ||||
| - name: Test Ansible URL Validation | ||||
|   hosts: localhost | ||||
|   tasks: | ||||
|     - name: Download a file | ||||
|       ansible.builtin.get_url: | ||||
|         url: "http:/wikipedia.org/" | ||||
|         dest: "/tmp/ansible-test/wikipedia.html" | ||||
|         mode: '0900' | ||||
| After Width: | Height: | Size: 22 KiB | 
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/ansible_fail.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 275 KiB | 
| After Width: | Height: | Size: 212 KiB | 
| After Width: | Height: | Size: 384 KiB | 
| After Width: | Height: | Size: 8.3 KiB | 
							
								
								
									
										195
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/slides.html
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										241
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/slides.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,241 @@ | ||||
| --- | ||||
| theme: uncover | ||||
| --- | ||||
| 
 | ||||
| # Voici l'histoire de Petit Poisson | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer.jpg" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./happy_landscape_swimmer.jpg" width="1000"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer.jpg" width="200"/> | ||||
| 
 | ||||
| <img src="./tryrust.org.png" width="600"/> | ||||
| 
 | ||||
| [https://tryrust.org](https://tryrust.org) | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_deploy_prod_1.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_deploy_prod_2.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_deploy_prod_3.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_deploy_prod_4.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ## Demo time | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer_sunglasses.jpg" width="1000"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_download_wikipedia.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./ansible.jpg" width="200"/> | ||||
| 
 | ||||
| ## Ansible❓ | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer.jpg" width="200"/> | ||||
| 
 | ||||
| ```yaml | ||||
| - name: Download wikipedia | ||||
|   hosts: localhost | ||||
|   tasks: | ||||
|     - name: Download a file | ||||
|       ansible.builtin.get_url: | ||||
|         url: "https:/wikipedia.org/" | ||||
|         dest: "/tmp/ansible-test/wikipedia.html" | ||||
|         mode: '0900' | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer.jpg" width="200"/> | ||||
| 
 | ||||
| ``` | ||||
| ansible-lint download.yml | ||||
| 
 | ||||
| Passed: 0 failure(s), 0 warning(s) on 1 files. Last profile that met the validation criteria was 'production'. | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ``` | ||||
| git push | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./75_years_later.jpg" width="1100"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./texto_download_wikipedia_fail.png" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer_reversed.jpg" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./ansible_output_fail.jpg" width="1100"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer_reversed_1hit.jpg" width="600"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./ansible_crossed_out.jpg" width="400"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| 
 | ||||
| <img src="./terraform.jpg" width="400"/> | ||||
| 
 | ||||
| ## Terraform❓❗ | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer_reversed_1hit.jpg" width="200"/> | ||||
| <img src="./terraform.jpg" width="200"/> | ||||
| 
 | ||||
| ```tf | ||||
| provider "docker" {} | ||||
| 
 | ||||
| resource "docker_network" "invalid_network" { | ||||
|   name = "my-invalid-network" | ||||
| 
 | ||||
|   ipam_config { | ||||
|     subnet = "172.17.0.0/33" | ||||
|   } | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer_reversed_1hit.jpg" width="100"/> | ||||
| <img src="./terraform.jpg" width="200"/> | ||||
| 
 | ||||
| ``` | ||||
| terraform plan | ||||
| 
 | ||||
| Terraform used the selected providers to generate the following execution plan. | ||||
| Resource actions are indicated with the following symbols: | ||||
|   + create | ||||
| 
 | ||||
| Terraform will perform the following actions: | ||||
| 
 | ||||
|   # docker_network.invalid_network will be created | ||||
|   + resource "docker_network" "invalid_network" { | ||||
|       + driver      = (known after apply) | ||||
|       + id          = (known after apply) | ||||
|       + internal    = (known after apply) | ||||
|       + ipam_driver = "default" | ||||
|       + name        = "my-invalid-network" | ||||
|       + options     = (known after apply) | ||||
|       + scope       = (known after apply) | ||||
| 
 | ||||
|       + ipam_config { | ||||
|           + subnet   = "172.17.0.0/33" | ||||
|             # (2 unchanged attributes hidden) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| Plan: 1 to add, 0 to change, 0 to destroy. | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ✅ | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ``` | ||||
| terraform apply | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ``` | ||||
| Plan: 1 to add, 0 to change, 0 to destroy. | ||||
| 
 | ||||
| Do you want to perform these actions? | ||||
|   Terraform will perform the actions described above. | ||||
|   Only 'yes' will be accepted to approve. | ||||
| 
 | ||||
|   Enter a value: yes | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ``` | ||||
| docker_network.invalid_network: Creating... | ||||
| ╷ | ||||
| │ Error: Unable to create network: Error response from daemon: invalid network config: | ||||
| │ invalid subnet 172.17.0.0/33: invalid CIDR block notation | ||||
| │ | ||||
| │   with docker_network.invalid_network, | ||||
| │   on main.tf line 11, in resource "docker_network" "invalid_network": | ||||
| │   11: resource "docker_network" "invalid_network" { | ||||
| │ | ||||
| ╵ | ||||
| ``` | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| 
 | ||||
| <img src="./Happy_swimmer_reversed_fullhit.jpg" width="1100"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./ansible_crossed_out.jpg" width="300"/> | ||||
| <img src="./terraform_crossed_out.jpg" width="400"/> | ||||
| <img src="./Happy_swimmer_reversed_fullhit.jpg" width="300"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| ## Harmony❓❗ | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| Demo time | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| <img src="./Happy_swimmer.jpg" width="300"/> | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| # 🎼  | ||||
| 
 | ||||
| Harmony : [https://git.nationtech.io/nationtech/harmony](https://git.nationtech.io/nationtech/harmony) | ||||
| 
 | ||||
| 
 | ||||
|  <img src="./qrcode_gitea_nationtech.png" width="120"/> | ||||
| 
 | ||||
| 
 | ||||
| LinkedIn : [https://www.linkedin.com/in/jean-gabriel-gill-couture/](https://www.linkedin.com/in/jean-gabriel-gill-couture/) | ||||
| 
 | ||||
| Courriel : [jg@nationtech.io](mailto:jg@nationtech.io) | ||||
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/terraform.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 11 KiB | 
| @ -0,0 +1,40 @@ | ||||
| # This file is maintained automatically by "terraform init". | ||||
| # Manual edits may be lost in future updates. | ||||
| 
 | ||||
| provider "registry.terraform.io/hashicorp/http" { | ||||
|   version = "3.5.0" | ||||
|   hashes = [ | ||||
|     "h1:8bUoPwS4hahOvzCBj6b04ObLVFXCEmEN8T/5eOHmWOM=", | ||||
|     "zh:047c5b4920751b13425efe0d011b3a23a3be97d02d9c0e3c60985521c9c456b7", | ||||
|     "zh:157866f700470207561f6d032d344916b82268ecd0cf8174fb11c0674c8d0736", | ||||
|     "zh:1973eb9383b0d83dd4fd5e662f0f16de837d072b64a6b7cd703410d730499476", | ||||
|     "zh:212f833a4e6d020840672f6f88273d62a564f44acb0c857b5961cdb3bbc14c90", | ||||
|     "zh:2c8034bc039fffaa1d4965ca02a8c6d57301e5fa9fff4773e684b46e3f78e76a", | ||||
|     "zh:5df353fc5b2dd31577def9cc1a4ebf0c9a9c2699d223c6b02087a3089c74a1c6", | ||||
|     "zh:672083810d4185076c81b16ad13d1224b9e6ea7f4850951d2ab8d30fa6e41f08", | ||||
|     "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", | ||||
|     "zh:7b4200f18abdbe39904b03537e1a78f21ebafe60f1c861a44387d314fda69da6", | ||||
|     "zh:843feacacd86baed820f81a6c9f7bd32cf302db3d7a0f39e87976ebc7a7cc2ee", | ||||
|     "zh:a9ea5096ab91aab260b22e4251c05f08dad2ed77e43e5e4fadcdfd87f2c78926", | ||||
|     "zh:d02b288922811739059e90184c7f76d45d07d3a77cc48d0b15fd3db14e928623", | ||||
|   ] | ||||
| } | ||||
| 
 | ||||
| provider "registry.terraform.io/hashicorp/local" { | ||||
|   version = "2.5.3" | ||||
|   hashes = [ | ||||
|     "h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=", | ||||
|     "zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927", | ||||
|     "zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e", | ||||
|     "zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b", | ||||
|     "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", | ||||
|     "zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf", | ||||
|     "zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d", | ||||
|     "zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09", | ||||
|     "zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f", | ||||
|     "zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1", | ||||
|     "zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6", | ||||
|     "zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47", | ||||
|     "zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82", | ||||
|   ] | ||||
| } | ||||
| @ -0,0 +1,10 @@ | ||||
| provider "http" {} | ||||
| 
 | ||||
| data "http" "remote_file" { | ||||
|   url = "http:/example.com/file.txt" | ||||
| } | ||||
| 
 | ||||
| resource "local_file" "downloaded_file" { | ||||
|   content  = data.http.remote_file.body | ||||
|   filename = "${path.module}/downloaded_file.txt" | ||||
| } | ||||
| @ -0,0 +1,24 @@ | ||||
| # This file is maintained automatically by "terraform init". | ||||
| # Manual edits may be lost in future updates. | ||||
| 
 | ||||
| provider "registry.terraform.io/kreuzwerker/docker" { | ||||
|   version     = "3.0.2" | ||||
|   constraints = "~> 3.0.1" | ||||
|   hashes = [ | ||||
|     "h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=", | ||||
|     "zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f", | ||||
|     "zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95", | ||||
|     "zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138", | ||||
|     "zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da", | ||||
|     "zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2", | ||||
|     "zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06", | ||||
|     "zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1", | ||||
|     "zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710", | ||||
|     "zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005", | ||||
|     "zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d", | ||||
|     "zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792", | ||||
|     "zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387", | ||||
|     "zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62", | ||||
|     "zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d", | ||||
|   ] | ||||
| } | ||||
| @ -0,0 +1,17 @@ | ||||
| terraform { | ||||
|   required_providers { | ||||
|     docker = { | ||||
|       source  = "kreuzwerker/docker" | ||||
|       version = "~> 3.0.1" # Adjust version as needed | ||||
|     } | ||||
|   } | ||||
| } | ||||
| provider "docker" {} | ||||
| 
 | ||||
| resource "docker_network" "invalid_network" { | ||||
|   name = "my-invalid-network" | ||||
| 
 | ||||
|   ipam_config { | ||||
|     subnet = "172.17.0.0/33" | ||||
|   } | ||||
| } | ||||
| After Width: | Height: | Size: 14 KiB | 
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/terraform_fail.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 144 KiB | 
| After Width: | Height: | Size: 58 KiB | 
| After Width: | Height: | Size: 56 KiB | 
| After Width: | Height: | Size: 71 KiB | 
| After Width: | Height: | Size: 81 KiB | 
| After Width: | Height: | Size: 87 KiB | 
| After Width: | Height: | Size: 88 KiB | 
| After Width: | Height: | Size: 48 KiB | 
							
								
								
									
										
											BIN
										
									
								
								demos/cncf-k8s-quebec-meetup-september-2025/tryrust.org.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 325 KiB | 
| @ -27,7 +27,6 @@ async fn main() { | ||||
|     }; | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "example-monitoring".to_string(), | ||||
|         domain: Url::Url(url::Url::parse("https://rustapp.harmony.example.com").unwrap()), | ||||
|         project_root: PathBuf::from("./examples/rust/webapp"), | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 3000, | ||||
|  | ||||
| @ -17,3 +17,5 @@ harmony_secret = { path = "../../harmony_secret" } | ||||
| log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| brocade = { path = "../../brocade" } | ||||
|  | ||||
| @ -3,12 +3,13 @@ use std::{ | ||||
|     sync::Arc, | ||||
| }; | ||||
| 
 | ||||
| use brocade::BrocadeOptions; | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     config::secret::SshKeyPair, | ||||
|     data::{FileContent, FilePath}, | ||||
|     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         http::StaticFilesHttpScore, | ||||
| @ -22,8 +23,9 @@ use harmony::{ | ||||
|     topology::{LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, mac_address}; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use harmony_types::net::Url; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
| @ -32,6 +34,26 @@ async fn main() { | ||||
|         name: String::from("fw0"), | ||||
|     }; | ||||
| 
 | ||||
|     let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>() | ||||
|         .await | ||||
|         .expect("Failed to get credentials"); | ||||
| 
 | ||||
|     let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")]; | ||||
|     let brocade_options = Some(BrocadeOptions { | ||||
|         dry_run: *harmony::config::DRY_RUN, | ||||
|         ..Default::default() | ||||
|     }); | ||||
|     let switch_client = BrocadeSwitchClient::init( | ||||
|         &switches, | ||||
|         &switch_auth.username, | ||||
|         &switch_auth.password, | ||||
|         brocade_options, | ||||
|     ) | ||||
|     .await | ||||
|     .expect("Failed to connect to switch"); | ||||
| 
 | ||||
|     let switch_client = Arc::new(switch_client); | ||||
| 
 | ||||
|     let opnsense = Arc::new( | ||||
|         harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, | ||||
|     ); | ||||
| @ -39,6 +61,7 @@ async fn main() { | ||||
|     let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1); | ||||
|     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||
|     let topology = harmony::topology::HAClusterTopology { | ||||
|         kubeconfig: None, | ||||
|         domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
 | ||||
|         // when setting up the opnsense firewall
 | ||||
|         router: Arc::new(UnmanagedRouter::new( | ||||
| @ -83,7 +106,7 @@ async fn main() { | ||||
|                 name: "wk2".to_string(), | ||||
|             }, | ||||
|         ], | ||||
|         switch: vec![], | ||||
|         switch_client: switch_client.clone(), | ||||
|     }; | ||||
| 
 | ||||
|     let inventory = Inventory { | ||||
| @ -166,3 +189,9 @@ async fn main() { | ||||
|     .await | ||||
|     .unwrap(); | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug)] | ||||
| pub struct BrocadeSwitchAuth { | ||||
|     pub username: String, | ||||
|     pub password: String, | ||||
| } | ||||
|  | ||||
| @ -19,3 +19,4 @@ log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
| serde.workspace = true | ||||
| brocade = { path = "../../brocade" } | ||||
|  | ||||
| @ -1,7 +1,8 @@ | ||||
| use brocade::BrocadeOptions; | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     hardware::{Location, SwitchGroup}, | ||||
|     infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, | ||||
|     inventory::Inventory, | ||||
|     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| @ -22,6 +23,26 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|         name: String::from("opnsense-1"), | ||||
|     }; | ||||
| 
 | ||||
|     let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>() | ||||
|         .await | ||||
|         .expect("Failed to get credentials"); | ||||
| 
 | ||||
|     let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
 | ||||
|     let brocade_options = Some(BrocadeOptions { | ||||
|         dry_run: *harmony::config::DRY_RUN, | ||||
|         ..Default::default() | ||||
|     }); | ||||
|     let switch_client = BrocadeSwitchClient::init( | ||||
|         &switches, | ||||
|         &switch_auth.username, | ||||
|         &switch_auth.password, | ||||
|         brocade_options, | ||||
|     ) | ||||
|     .await | ||||
|     .expect("Failed to connect to switch"); | ||||
| 
 | ||||
|     let switch_client = Arc::new(switch_client); | ||||
| 
 | ||||
|     let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await; | ||||
|     let config = config.unwrap(); | ||||
| 
 | ||||
| @ -38,6 +59,7 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|     let gateway_ipv4 = ipv4!("192.168.1.1"); | ||||
|     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||
|     harmony::topology::HAClusterTopology { | ||||
|         kubeconfig: None, | ||||
|         domain_name: "demo.harmony.mcd".to_string(), | ||||
|         router: Arc::new(UnmanagedRouter::new( | ||||
|             gateway_ip, | ||||
| @ -58,7 +80,7 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|             name: "bootstrap".to_string(), | ||||
|         }, | ||||
|         workers: vec![], | ||||
|         switch: vec![], | ||||
|         switch_client: switch_client.clone(), | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -75,3 +97,9 @@ pub fn get_inventory() -> Inventory { | ||||
|         control_plane_host: vec![], | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug)] | ||||
| pub struct BrocadeSwitchAuth { | ||||
|     pub username: String, | ||||
|     pub password: String, | ||||
| } | ||||
|  | ||||
| @ -19,3 +19,4 @@ log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
| serde.workspace = true | ||||
| brocade = { path = "../../brocade" } | ||||
|  | ||||
| @ -1,13 +1,15 @@ | ||||
| use brocade::BrocadeOptions; | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     config::secret::OPNSenseFirewallCredentials, | ||||
|     hardware::{Location, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, | ||||
|     inventory::Inventory, | ||||
|     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, ipv4}; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::{net::IpAddr, sync::Arc}; | ||||
| 
 | ||||
| pub async fn get_topology() -> HAClusterTopology { | ||||
| @ -16,6 +18,26 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|         name: String::from("opnsense-1"), | ||||
|     }; | ||||
| 
 | ||||
|     let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>() | ||||
|         .await | ||||
|         .expect("Failed to get credentials"); | ||||
| 
 | ||||
|     let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
 | ||||
|     let brocade_options = Some(BrocadeOptions { | ||||
|         dry_run: *harmony::config::DRY_RUN, | ||||
|         ..Default::default() | ||||
|     }); | ||||
|     let switch_client = BrocadeSwitchClient::init( | ||||
|         &switches, | ||||
|         &switch_auth.username, | ||||
|         &switch_auth.password, | ||||
|         brocade_options, | ||||
|     ) | ||||
|     .await | ||||
|     .expect("Failed to connect to switch"); | ||||
| 
 | ||||
|     let switch_client = Arc::new(switch_client); | ||||
| 
 | ||||
|     let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await; | ||||
|     let config = config.unwrap(); | ||||
| 
 | ||||
| @ -32,6 +54,7 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|     let gateway_ipv4 = ipv4!("192.168.1.1"); | ||||
|     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||
|     harmony::topology::HAClusterTopology { | ||||
|         kubeconfig: None, | ||||
|         domain_name: "demo.harmony.mcd".to_string(), | ||||
|         router: Arc::new(UnmanagedRouter::new( | ||||
|             gateway_ip, | ||||
| @ -52,7 +75,7 @@ pub async fn get_topology() -> HAClusterTopology { | ||||
|             name: "cp0".to_string(), | ||||
|         }, | ||||
|         workers: vec![], | ||||
|         switch: vec![], | ||||
|         switch_client: switch_client.clone(), | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -69,3 +92,9 @@ pub fn get_inventory() -> Inventory { | ||||
|         control_plane_host: vec![], | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug)] | ||||
| pub struct BrocadeSwitchAuth { | ||||
|     pub username: String, | ||||
|     pub password: String, | ||||
| } | ||||
|  | ||||
							
								
								
									
										14
									
								
								examples/openbao/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,14 @@ | ||||
| [package] | ||||
| name = "example-openbao" | ||||
| edition = "2024" | ||||
| version.workspace = true | ||||
| readme.workspace = true | ||||
| license.workspace = true | ||||
| 
 | ||||
| [dependencies] | ||||
| harmony = { path = "../../harmony" } | ||||
| harmony_cli = { path = "../../harmony_cli" } | ||||
| harmony_macros = { path = "../../harmony_macros" } | ||||
| harmony_types = { path = "../../harmony_types" } | ||||
| tokio.workspace = true | ||||
| url.workspace = true | ||||
							
								
								
									
										7
									
								
								examples/openbao/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,7 @@ | ||||
| To install an openbao instance with harmony simply `cargo run -p example-openbao` . | ||||
| 
 | ||||
| Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster. | ||||
| 
 | ||||
| Then follow the openbao documentation to initialize and unseal, this will make openbao usable. | ||||
| 
 | ||||
| https://openbao.org/docs/platform/k8s/helm/run/ | ||||
							
								
								
									
										67
									
								
								examples/openbao/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,67 @@ | ||||
| use std::{collections::HashMap, str::FromStr}; | ||||
| 
 | ||||
| use harmony::{ | ||||
|     inventory::Inventory, | ||||
|     modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString}, | ||||
|     topology::K8sAnywhereTopology, | ||||
| }; | ||||
| use harmony_macros::hurl; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let values_yaml = Some( | ||||
|         r#"server:
 | ||||
|   standalone: | ||||
|     enabled: true | ||||
|     config: | | ||||
|       listener "tcp" { | ||||
|         tls_disable = true | ||||
|         address = "[::]:8200" | ||||
|         cluster_address = "[::]:8201" | ||||
|       } | ||||
| 
 | ||||
|       storage "file" { | ||||
|         path = "/openbao/data" | ||||
|       } | ||||
| 
 | ||||
|   service: | ||||
|     enabled: true | ||||
| 
 | ||||
|   dataStorage: | ||||
|     enabled: true | ||||
|     size: 10Gi | ||||
|     storageClass: null | ||||
|     accessMode: ReadWriteOnce | ||||
| 
 | ||||
|   auditStorage: | ||||
|     enabled: true | ||||
|     size: 10Gi | ||||
|     storageClass: null | ||||
|     accessMode: ReadWriteOnce"#
 | ||||
|             .to_string(), | ||||
|     ); | ||||
|     let openbao = HelmChartScore { | ||||
|         namespace: Some(NonBlankString::from_str("openbao").unwrap()), | ||||
|         release_name: NonBlankString::from_str("openbao").unwrap(), | ||||
|         chart_name: NonBlankString::from_str("openbao/openbao").unwrap(), | ||||
|         chart_version: None, | ||||
|         values_overrides: None, | ||||
|         values_yaml, | ||||
|         create_namespace: true, | ||||
|         install_only: true, | ||||
|         repository: Some(HelmRepository::new( | ||||
|             "openbao".to_string(), | ||||
|             hurl!("https://openbao.github.io/openbao-helm"), | ||||
|             true, | ||||
|         )), | ||||
|     }; | ||||
| 
 | ||||
|     harmony_cli::run( | ||||
|         Inventory::autoload(), | ||||
|         K8sAnywhereTopology::from_env(), | ||||
|         vec![Box::new(openbao)], | ||||
|         None, | ||||
|     ) | ||||
|     .await | ||||
|     .unwrap(); | ||||
| } | ||||
| @ -16,3 +16,6 @@ harmony_macros = { path = "../../harmony_macros" } | ||||
| log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
| harmony_secret = { path = "../../harmony_secret" } | ||||
| brocade = { path = "../../brocade" } | ||||
| serde = { workspace = true } | ||||
|  | ||||
| @ -3,10 +3,11 @@ use std::{ | ||||
|     sync::Arc, | ||||
| }; | ||||
| 
 | ||||
| use brocade::BrocadeOptions; | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface}, | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         dummy::{ErrorScore, PanicScore, SuccessScore}, | ||||
| @ -18,7 +19,9 @@ use harmony::{ | ||||
|     topology::{LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, mac_address}; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use harmony_types::net::Url; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
| @ -27,6 +30,26 @@ async fn main() { | ||||
|         name: String::from("opnsense-1"), | ||||
|     }; | ||||
| 
 | ||||
|     let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>() | ||||
|         .await | ||||
|         .expect("Failed to get credentials"); | ||||
| 
 | ||||
|     let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
 | ||||
|     let brocade_options = Some(BrocadeOptions { | ||||
|         dry_run: *harmony::config::DRY_RUN, | ||||
|         ..Default::default() | ||||
|     }); | ||||
|     let switch_client = BrocadeSwitchClient::init( | ||||
|         &switches, | ||||
|         &switch_auth.username, | ||||
|         &switch_auth.password, | ||||
|         brocade_options, | ||||
|     ) | ||||
|     .await | ||||
|     .expect("Failed to connect to switch"); | ||||
| 
 | ||||
|     let switch_client = Arc::new(switch_client); | ||||
| 
 | ||||
|     let opnsense = Arc::new( | ||||
|         harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, | ||||
|     ); | ||||
| @ -34,6 +57,7 @@ async fn main() { | ||||
|     let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1); | ||||
|     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||
|     let topology = harmony::topology::HAClusterTopology { | ||||
|         kubeconfig: None, | ||||
|         domain_name: "demo.harmony.mcd".to_string(), | ||||
|         router: Arc::new(UnmanagedRouter::new( | ||||
|             gateway_ip, | ||||
| @ -54,7 +78,7 @@ async fn main() { | ||||
|             name: "cp0".to_string(), | ||||
|         }, | ||||
|         workers: vec![], | ||||
|         switch: vec![], | ||||
|         switch_client: switch_client.clone(), | ||||
|     }; | ||||
| 
 | ||||
|     let inventory = Inventory { | ||||
| @ -109,3 +133,9 @@ async fn main() { | ||||
|     .await | ||||
|     .unwrap(); | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug)] | ||||
| pub struct BrocadeSwitchAuth { | ||||
|     pub username: String, | ||||
|     pub password: String, | ||||
| } | ||||
|  | ||||
							
								
								
									
										11
									
								
								examples/remove_rook_osd/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,11 @@ | ||||
| [package] | ||||
| name = "example-remove-rook-osd" | ||||
| edition = "2024" | ||||
| version.workspace = true | ||||
| readme.workspace = true | ||||
| license.workspace = true | ||||
| 
 | ||||
| [dependencies] | ||||
| harmony = { version = "0.1.0", path = "../../harmony" } | ||||
| harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } | ||||
| tokio.workspace = true | ||||
							
								
								
									
										18
									
								
								examples/remove_rook_osd/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,18 @@ | ||||
| use harmony::{ | ||||
|     inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd, | ||||
|     topology::K8sAnywhereTopology, | ||||
| }; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let ceph_score = CephRemoveOsd { | ||||
|         osd_deployment_name: "rook-ceph-osd-2".to_string(), | ||||
|         rook_ceph_namespace: "rook-ceph".to_string(), | ||||
|     }; | ||||
| 
 | ||||
|     let topology = K8sAnywhereTopology::from_env(); | ||||
|     let inventory = Inventory::autoload(); | ||||
|     harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| } | ||||
| @ -4,8 +4,7 @@ use harmony::{ | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::rhob_monitoring::RHOBMonitoring, | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, features::rhob_monitoring::Monitoring, | ||||
|         }, | ||||
|         monitoring::alert_channel::discord_alert_channel::DiscordWebhook, | ||||
|     }, | ||||
| @ -17,7 +16,6 @@ use harmony_types::net::Url; | ||||
| async fn main() { | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "test-rhob-monitoring".to_string(), | ||||
|         domain: Url::Url(url::Url::parse("htps://some-fake-url").unwrap()), | ||||
|         project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
 | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 3000, | ||||
| @ -30,7 +28,7 @@ async fn main() { | ||||
| 
 | ||||
|     let app = ApplicationScore { | ||||
|         features: vec![ | ||||
|             Box::new(RHOBMonitoring { | ||||
|             Box::new(Monitoring { | ||||
|                 application: application.clone(), | ||||
|                 alert_receiver: vec![Box::new(discord_receiver)], | ||||
|             }), | ||||
|  | ||||
| @ -5,7 +5,7 @@ use harmony::{ | ||||
|     modules::{ | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::{ContinuousDelivery, Monitoring}, | ||||
|             features::{Monitoring, PackagingDeployment}, | ||||
|         }, | ||||
|         monitoring::alert_channel::{ | ||||
|             discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, | ||||
| @ -19,7 +19,6 @@ use harmony_macros::hurl; | ||||
| async fn main() { | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "harmony-example-rust-webapp".to_string(), | ||||
|         domain: hurl!("https://rustapp.harmony.example.com"), | ||||
|         project_root: PathBuf::from("./webapp"), | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 3000, | ||||
| @ -37,7 +36,7 @@ async fn main() { | ||||
| 
 | ||||
|     let app = ApplicationScore { | ||||
|         features: vec![ | ||||
|             Box::new(ContinuousDelivery { | ||||
|             Box::new(PackagingDeployment { | ||||
|                 application: application.clone(), | ||||
|             }), | ||||
|             Box::new(Monitoring { | ||||
|  | ||||
							
								
								
									
										1
									
								
								examples/try_rust_webapp/files_to_add/.dockerignore
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1 @@ | ||||
| harmony | ||||
							
								
								
									
										20
									
								
								examples/try_rust_webapp/files_to_add/Cargo.toml.to_add
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,20 @@ | ||||
| [package] | ||||
| name = "harmony-tryrust" | ||||
| edition = "2024" | ||||
| version = "0.1.0" | ||||
| 
 | ||||
| [dependencies] | ||||
| harmony = { path = "../../../nationtech/harmony/harmony" } | ||||
| harmony_cli = { path = "../../../nationtech/harmony/harmony_cli" } | ||||
| harmony_types = { path = "../../../nationtech/harmony/harmony_types" } | ||||
| harmony_macros = { path = "../../../nationtech/harmony/harmony_macros" } | ||||
| tokio = { version = "1.40", features = [ | ||||
|   "io-std", | ||||
|   "fs", | ||||
|   "macros", | ||||
|   "rt-multi-thread", | ||||
| ] } | ||||
| log = { version = "0.4", features = ["kv"] } | ||||
| env_logger = "0.11" | ||||
| url = "2.5" | ||||
| base64 = "0.22.1" | ||||
							
								
								
									
										50
									
								
								examples/try_rust_webapp/files_to_add/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,50 @@ | ||||
| use harmony::{ | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::{PackagingDeployment, rhob_monitoring::Monitoring}, | ||||
|         }, | ||||
|         monitoring::alert_channel::discord_alert_channel::DiscordWebhook, | ||||
|     }, | ||||
|     topology::K8sAnywhereTopology, | ||||
| }; | ||||
| use harmony_macros::hurl; | ||||
| use std::{path::PathBuf, sync::Arc}; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "tryrust".to_string(), | ||||
|         project_root: PathBuf::from(".."), | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 8080, | ||||
|     }); | ||||
| 
 | ||||
|     let discord_webhook = DiscordWebhook { | ||||
|         name: "harmony_demo".to_string(), | ||||
|         url: hurl!("http://not_a_url.com"), | ||||
|     }; | ||||
| 
 | ||||
|     let app = ApplicationScore { | ||||
|         features: vec![ | ||||
|             Box::new(PackagingDeployment { | ||||
|                 application: application.clone(), | ||||
|             }), | ||||
|             Box::new(Monitoring { | ||||
|                 application: application.clone(), | ||||
|                 alert_receiver: vec![Box::new(discord_webhook)], | ||||
|             }), | ||||
|         ], | ||||
|         application, | ||||
|     }; | ||||
| 
 | ||||
|     harmony_cli::run( | ||||
|         Inventory::autoload(), | ||||
|         K8sAnywhereTopology::from_env(), | ||||
|         vec![Box::new(app)], | ||||
|         None, | ||||
|     ) | ||||
|     .await | ||||
|     .unwrap(); | ||||
| } | ||||
| @ -1,41 +1,39 @@ | ||||
| use std::{path::PathBuf, sync::Arc}; | ||||
| 
 | ||||
| use harmony::{ | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         application::{ | ||||
|             ApplicationScore, RustWebFramework, RustWebapp, | ||||
|             features::{ContinuousDelivery, Monitoring}, | ||||
|             features::{Monitoring, PackagingDeployment}, | ||||
|         }, | ||||
|         monitoring::alert_channel::discord_alert_channel::DiscordWebhook, | ||||
|     }, | ||||
|     topology::K8sAnywhereTopology, | ||||
| }; | ||||
| use harmony_types::net::Url; | ||||
| use harmony_macros::hurl; | ||||
| use std::{path::PathBuf, sync::Arc}; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let application = Arc::new(RustWebapp { | ||||
|         name: "harmony-example-tryrust".to_string(), | ||||
|         domain: Url::Url(url::Url::parse("https://tryrust.harmony.example.com").unwrap()), | ||||
|         project_root: PathBuf::from("./tryrust.org"), | ||||
|         project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
 | ||||
|         // submodule
 | ||||
|         framework: Some(RustWebFramework::Leptos), | ||||
|         service_port: 8080, | ||||
|     }); | ||||
| 
 | ||||
|     let discord_receiver = DiscordWebhook { | ||||
|         name: "test-discord".to_string(), | ||||
|         url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()), | ||||
|     }; | ||||
| 
 | ||||
|     // Define your Application deployment and the features you want
 | ||||
|     let app = ApplicationScore { | ||||
|         features: vec![ | ||||
|             Box::new(ContinuousDelivery { | ||||
|             Box::new(PackagingDeployment { | ||||
|                 application: application.clone(), | ||||
|             }), | ||||
|             Box::new(Monitoring { | ||||
|                 application: application.clone(), | ||||
|                 alert_receiver: vec![Box::new(discord_receiver)], | ||||
|                 alert_receiver: vec![Box::new(DiscordWebhook { | ||||
|                     name: "test-discord".to_string(), | ||||
|                     url: hurl!("https://discord.doesnt.exist.com"), | ||||
|                 })], | ||||
|             }), | ||||
|         ], | ||||
|         application, | ||||
| @ -43,7 +41,7 @@ async fn main() { | ||||
| 
 | ||||
|     harmony_cli::run( | ||||
|         Inventory::autoload(), | ||||
|         K8sAnywhereTopology::from_env(), | ||||
|         K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned k3d by default or connect to any kubernetes cluster
 | ||||
|         vec![Box::new(app)], | ||||
|         None, | ||||
|     ) | ||||
|  | ||||
| @ -10,7 +10,11 @@ testing = [] | ||||
| 
 | ||||
| [dependencies] | ||||
| hex = "0.4" | ||||
| reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false } | ||||
| reqwest = { version = "0.11", features = [ | ||||
|   "blocking", | ||||
|   "json", | ||||
|   "rustls-tls", | ||||
| ], default-features = false } | ||||
| russh = "0.45.0" | ||||
| rust-ipmi = "0.1.1" | ||||
| semver = "1.0.23" | ||||
| @ -73,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" } | ||||
| askama.workspace = true | ||||
| sqlx.workspace = true | ||||
| inquire.workspace = true | ||||
| brocade = { path = "../brocade" } | ||||
| option-ext = "0.2.0" | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| pretty_assertions.workspace = true | ||||
| assertor.workspace = true | ||||
|  | ||||
| @ -30,10 +30,12 @@ pub enum InterpretName { | ||||
|     Lamp, | ||||
|     ApplicationMonitoring, | ||||
|     K8sPrometheusCrdAlerting, | ||||
|     CephRemoveOsd, | ||||
|     DiscoverInventoryAgent, | ||||
|     CephClusterHealth, | ||||
|     Custom(&'static str), | ||||
|     RHOBAlerting, | ||||
|     K8sIngress, | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for InterpretName { | ||||
| @ -60,10 +62,12 @@ impl std::fmt::Display for InterpretName { | ||||
|             InterpretName::Lamp => f.write_str("LAMP"), | ||||
|             InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), | ||||
|             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), | ||||
|             InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"), | ||||
|             InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), | ||||
|             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), | ||||
|             InterpretName::Custom(name) => f.write_str(name), | ||||
|             InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), | ||||
|             InterpretName::K8sIngress => f.write_str("K8sIngress"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -82,13 +86,15 @@ pub trait Interpret<T>: std::fmt::Debug + Send { | ||||
| pub struct Outcome { | ||||
|     pub status: InterpretStatus, | ||||
|     pub message: String, | ||||
|     pub details: Vec<String>, | ||||
| } | ||||
| 
 | ||||
| impl Outcome { | ||||
|     pub fn noop() -> Self { | ||||
|     pub fn noop(message: String) -> Self { | ||||
|         Self { | ||||
|             status: InterpretStatus::NOOP, | ||||
|             message: String::new(), | ||||
|             message, | ||||
|             details: vec![], | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -96,6 +102,23 @@ impl Outcome { | ||||
|         Self { | ||||
|             status: InterpretStatus::SUCCESS, | ||||
|             message, | ||||
|             details: vec![], | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn success_with_details(message: String, details: Vec<String>) -> Self { | ||||
|         Self { | ||||
|             status: InterpretStatus::SUCCESS, | ||||
|             message, | ||||
|             details, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn running(message: String) -> Self { | ||||
|         Self { | ||||
|             status: InterpretStatus::RUNNING, | ||||
|             message, | ||||
|             details: vec![], | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,33 +1,28 @@ | ||||
| use async_trait::async_trait; | ||||
| use harmony_macros::ip; | ||||
| use harmony_types::net::MacAddress; | ||||
| use harmony_types::net::Url; | ||||
| use harmony_types::{ | ||||
|     net::{MacAddress, Url}, | ||||
|     switch::PortLocation, | ||||
| }; | ||||
| use kube::api::ObjectMeta; | ||||
| use log::debug; | ||||
| use log::info; | ||||
| 
 | ||||
| use crate::data::FileContent; | ||||
| use crate::executors::ExecutorError; | ||||
| use crate::modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy}; | ||||
| use crate::topology::PxeOptions; | ||||
| use crate::{data::FileContent, modules::okd::crd::nmstate::NMState}; | ||||
| use crate::{ | ||||
|     executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec, | ||||
| }; | ||||
| 
 | ||||
| use super::DHCPStaticEntry; | ||||
| use super::DhcpServer; | ||||
| use super::DnsRecord; | ||||
| use super::DnsRecordType; | ||||
| use super::DnsServer; | ||||
| use super::Firewall; | ||||
| use super::HttpServer; | ||||
| use super::IpAddress; | ||||
| use super::K8sclient; | ||||
| use super::LoadBalancer; | ||||
| use super::LoadBalancerService; | ||||
| use super::LogicalHost; | ||||
| use super::PreparationError; | ||||
| use super::PreparationOutcome; | ||||
| use super::Router; | ||||
| use super::TftpServer; | ||||
| use super::{ | ||||
|     DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, | ||||
|     HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, | ||||
|     PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer, | ||||
|     Topology, k8s::K8sClient, | ||||
| }; | ||||
| 
 | ||||
| use super::Topology; | ||||
| use super::k8s::K8sClient; | ||||
| use std::collections::BTreeMap; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| @ -40,10 +35,11 @@ pub struct HAClusterTopology { | ||||
|     pub tftp_server: Arc<dyn TftpServer>, | ||||
|     pub http_server: Arc<dyn HttpServer>, | ||||
|     pub dns_server: Arc<dyn DnsServer>, | ||||
|     pub switch_client: Arc<dyn SwitchClient>, | ||||
|     pub bootstrap_host: LogicalHost, | ||||
|     pub control_plane: Vec<LogicalHost>, | ||||
|     pub workers: Vec<LogicalHost>, | ||||
|     pub switch: Vec<LogicalHost>, | ||||
|     pub kubeconfig: Option<String>, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| @ -62,9 +58,17 @@ impl Topology for HAClusterTopology { | ||||
| #[async_trait] | ||||
| impl K8sclient for HAClusterTopology { | ||||
|     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> { | ||||
|         Ok(Arc::new( | ||||
|             K8sClient::try_default().await.map_err(|e| e.to_string())?, | ||||
|         )) | ||||
|         match &self.kubeconfig { | ||||
|             None => Ok(Arc::new( | ||||
|                 K8sClient::try_default().await.map_err(|e| e.to_string())?, | ||||
|             )), | ||||
|             Some(kubeconfig) => { | ||||
|                 let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else { | ||||
|                     return Err("Failed to create k8s client".to_string()); | ||||
|                 }; | ||||
|                 Ok(Arc::new(client)) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -89,6 +93,193 @@ impl HAClusterTopology { | ||||
|             .to_string() | ||||
|     } | ||||
| 
 | ||||
|     async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> { | ||||
|         let k8s_client = self.k8s_client().await?; | ||||
| 
 | ||||
|         debug!("Installing NMState controller..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         debug!("Creating NMState namespace..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         debug!("Creating NMState service account..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         debug!("Creating NMState role..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         debug!("Creating NMState role binding..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         debug!("Creating NMState operator..."); | ||||
|         k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
 | ||||
| ").unwrap(), Some("nmstate"))
 | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         k8s_client | ||||
|             .wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None) | ||||
|             .await?; | ||||
| 
 | ||||
|         let nmstate = NMState { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some("nmstate".to_string()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         debug!("Creating NMState: {nmstate:#?}"); | ||||
|         k8s_client | ||||
|             .apply(&nmstate, None) | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     fn get_next_bond_id(&self) -> u8 { | ||||
|         42 // FIXME: Find a better way to declare the bond id
 | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> { | ||||
|         self.ensure_nmstate_operator_installed() | ||||
|             .await | ||||
|             .map_err(|e| { | ||||
|                 SwitchError::new(format!( | ||||
|                     "Can't configure bond, NMState operator not available: {e}" | ||||
|                 )) | ||||
|             })?; | ||||
| 
 | ||||
|         let bond_config = self.create_bond_configuration(config); | ||||
|         debug!( | ||||
|             "Applying NMState bond config for host {}: {bond_config:#?}", | ||||
|             config.host_id | ||||
|         ); | ||||
|         self.k8s_client() | ||||
|             .await | ||||
|             .unwrap() | ||||
|             .apply(&bond_config, None) | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     fn create_bond_configuration( | ||||
|         &self, | ||||
|         config: &HostNetworkConfig, | ||||
|     ) -> NodeNetworkConfigurationPolicy { | ||||
|         let host_name = &config.host_id; | ||||
|         let bond_id = self.get_next_bond_id(); | ||||
|         let bond_name = format!("bond{bond_id}"); | ||||
| 
 | ||||
|         info!("Configuring bond '{bond_name}' for host '{host_name}'..."); | ||||
| 
 | ||||
|         let mut bond_mtu: Option<u32> = None; | ||||
|         let mut copy_mac_from: Option<String> = None; | ||||
|         let mut bond_ports = Vec::new(); | ||||
|         let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new(); | ||||
| 
 | ||||
|         for switch_port in &config.switch_ports { | ||||
|             let interface_name = switch_port.interface.name.clone(); | ||||
| 
 | ||||
|             interfaces.push(nmstate::InterfaceSpec { | ||||
|                 name: interface_name.clone(), | ||||
|                 description: Some(format!("Member of bond {bond_name}")), | ||||
|                 r#type: "ethernet".to_string(), | ||||
|                 state: "up".to_string(), | ||||
|                 mtu: Some(switch_port.interface.mtu), | ||||
|                 mac_address: Some(switch_port.interface.mac_address.to_string()), | ||||
|                 ipv4: Some(nmstate::IpStackSpec { | ||||
|                     enabled: Some(false), | ||||
|                     ..Default::default() | ||||
|                 }), | ||||
|                 ipv6: Some(nmstate::IpStackSpec { | ||||
|                     enabled: Some(false), | ||||
|                     ..Default::default() | ||||
|                 }), | ||||
|                 link_aggregation: None, | ||||
|                 ..Default::default() | ||||
|             }); | ||||
| 
 | ||||
|             bond_ports.push(interface_name.clone()); | ||||
| 
 | ||||
|             // Use the first port's details for the bond mtu and mac address
 | ||||
|             if bond_mtu.is_none() { | ||||
|                 bond_mtu = Some(switch_port.interface.mtu); | ||||
|             } | ||||
|             if copy_mac_from.is_none() { | ||||
|                 copy_mac_from = Some(interface_name); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         interfaces.push(nmstate::InterfaceSpec { | ||||
|             name: bond_name.clone(), | ||||
|             description: Some(format!("Network bond for host {host_name}")), | ||||
|             r#type: "bond".to_string(), | ||||
|             state: "up".to_string(), | ||||
|             copy_mac_from, | ||||
|             ipv4: Some(nmstate::IpStackSpec { | ||||
|                 dhcp: Some(true), | ||||
|                 enabled: Some(true), | ||||
|                 ..Default::default() | ||||
|             }), | ||||
|             ipv6: Some(nmstate::IpStackSpec { | ||||
|                 dhcp: Some(true), | ||||
|                 autoconf: Some(true), | ||||
|                 enabled: Some(true), | ||||
|                 ..Default::default() | ||||
|             }), | ||||
|             link_aggregation: Some(nmstate::BondSpec { | ||||
|                 mode: "802.3ad".to_string(), | ||||
|                 ports: bond_ports, | ||||
|                 ..Default::default() | ||||
|             }), | ||||
|             ..Default::default() | ||||
|         }); | ||||
| 
 | ||||
|         NodeNetworkConfigurationPolicy { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(format!("{host_name}-bond-config")), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: NodeNetworkConfigurationPolicySpec { | ||||
|                 node_selector: Some(BTreeMap::from([( | ||||
|                     "kubernetes.io/hostname".to_string(), | ||||
|                     host_name.to_string(), | ||||
|                 )])), | ||||
|                 desired_state: nmstate::DesiredStateSpec { interfaces }, | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> { | ||||
|         debug!("Configuring port channel: {config:#?}"); | ||||
|         let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect(); | ||||
| 
 | ||||
|         self.switch_client | ||||
|             .configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports) | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn autoload() -> Self { | ||||
|         let dummy_infra = Arc::new(DummyInfra {}); | ||||
|         let dummy_host = LogicalHost { | ||||
| @ -97,6 +288,7 @@ impl HAClusterTopology { | ||||
|         }; | ||||
| 
 | ||||
|         Self { | ||||
|             kubeconfig: None, | ||||
|             domain_name: "DummyTopology".to_string(), | ||||
|             router: dummy_infra.clone(), | ||||
|             load_balancer: dummy_infra.clone(), | ||||
| @ -105,10 +297,10 @@ impl HAClusterTopology { | ||||
|             tftp_server: dummy_infra.clone(), | ||||
|             http_server: dummy_infra.clone(), | ||||
|             dns_server: dummy_infra.clone(), | ||||
|             switch_client: dummy_infra.clone(), | ||||
|             bootstrap_host: dummy_host, | ||||
|             control_plane: vec![], | ||||
|             workers: vec![], | ||||
|             switch: vec![], | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -263,6 +455,27 @@ impl HttpServer for HAClusterTopology { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Switch for HAClusterTopology { | ||||
|     async fn setup_switch(&self) -> Result<(), SwitchError> { | ||||
|         self.switch_client.setup().await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn get_port_for_mac_address( | ||||
|         &self, | ||||
|         mac_address: &MacAddress, | ||||
|     ) -> Result<Option<PortLocation>, SwitchError> { | ||||
|         let port = self.switch_client.find_port(mac_address).await?; | ||||
|         Ok(port) | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> { | ||||
|         self.configure_bond(config).await?; | ||||
|         self.configure_port_channel(config).await | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct DummyInfra; | ||||
| 
 | ||||
| @ -332,8 +545,8 @@ impl DhcpServer for DummyInfra { | ||||
|     } | ||||
|     async fn set_dhcp_range( | ||||
|         &self, | ||||
|         start: &IpAddress, | ||||
|         end: &IpAddress, | ||||
|         _start: &IpAddress, | ||||
|         _end: &IpAddress, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| @ -449,3 +662,25 @@ impl DnsServer for DummyInfra { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl SwitchClient for DummyInfra { | ||||
|     async fn setup(&self) -> Result<(), SwitchError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| 
 | ||||
|     async fn find_port( | ||||
|         &self, | ||||
|         _mac_address: &MacAddress, | ||||
|     ) -> Result<Option<PortLocation>, SwitchError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_port_channel( | ||||
|         &self, | ||||
|         _channel_name: &str, | ||||
|         _switch_ports: Vec<PortLocation>, | ||||
|     ) -> Result<u8, SwitchError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| } | ||||
|  | ||||
							
								
								
									
										7
									
								
								harmony/src/domain/topology/ingress.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,7 @@ | ||||
| use crate::topology::PreparationError; | ||||
| use async_trait::async_trait; | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait Ingress { | ||||
|     async fn get_domain(&self, service: &str) -> Result<String, PreparationError>; | ||||
| } | ||||
| @ -1,13 +1,21 @@ | ||||
| use std::time::Duration; | ||||
| 
 | ||||
| use derive_new::new; | ||||
| use k8s_openapi::{ | ||||
|     ClusterResourceScope, NamespaceResourceScope, | ||||
|     api::{apps::v1::Deployment, core::v1::Pod}, | ||||
|     api::{ | ||||
|         apps::v1::Deployment, | ||||
|         core::v1::{Pod, ServiceAccount}, | ||||
|     }, | ||||
|     apimachinery::pkg::version::Info, | ||||
| }; | ||||
| use kube::{ | ||||
|     Client, Config, Error, Resource, | ||||
|     Client, Config, Discovery, Error, Resource, | ||||
|     api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt}, | ||||
|     config::{KubeConfigOptions, Kubeconfig}, | ||||
|     core::ErrorResponse, | ||||
|     discovery::{ApiCapabilities, Scope}, | ||||
|     error::DiscoveryError, | ||||
|     runtime::reflector::Lookup, | ||||
| }; | ||||
| use kube::{api::DynamicObject, runtime::conditions}; | ||||
| @ -15,11 +23,12 @@ use kube::{ | ||||
|     api::{ApiResource, GroupVersionKind}, | ||||
|     runtime::wait::await_condition, | ||||
| }; | ||||
| use log::{debug, error, trace}; | ||||
| use log::{debug, error, info, trace, warn}; | ||||
| use serde::{Serialize, de::DeserializeOwned}; | ||||
| use serde_json::{Value, json}; | ||||
| use serde_json::json; | ||||
| use similar::TextDiff; | ||||
| use tokio::io::AsyncReadExt; | ||||
| use tokio::{io::AsyncReadExt, time::sleep}; | ||||
| use url::Url; | ||||
| 
 | ||||
| #[derive(new, Clone)] | ||||
| pub struct K8sClient { | ||||
| @ -53,6 +62,22 @@ impl K8sClient { | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> { | ||||
|         let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace); | ||||
|         api | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_apiserver_version(&self) -> Result<Info, Error> { | ||||
|         let client: Client = self.client.clone(); | ||||
|         let version_info: Info = client.apiserver_version().await?; | ||||
|         Ok(version_info) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn discovery(&self) -> Result<Discovery, Error> { | ||||
|         let discovery: Discovery = Discovery::new(self.client.clone()).run().await?; | ||||
|         Ok(discovery) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_resource_json_value( | ||||
|         &self, | ||||
|         name: &str, | ||||
| @ -65,7 +90,8 @@ impl K8sClient { | ||||
|         } else { | ||||
|             Api::default_namespaced_with(self.client.clone(), &gvk) | ||||
|         }; | ||||
|         Ok(resource.get(name).await?) | ||||
| 
 | ||||
|         resource.get(name).await | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_deployment( | ||||
| @ -74,11 +100,15 @@ impl K8sClient { | ||||
|         namespace: Option<&str>, | ||||
|     ) -> Result<Option<Deployment>, Error> { | ||||
|         let deps: Api<Deployment> = if let Some(ns) = namespace { | ||||
|             debug!("getting namespaced deployment"); | ||||
|             Api::namespaced(self.client.clone(), ns) | ||||
|         } else { | ||||
|             debug!("getting default namespace deployment"); | ||||
|             Api::default_namespaced(self.client.clone()) | ||||
|         }; | ||||
|         Ok(deps.get_opt(name).await?) | ||||
| 
 | ||||
|         debug!("getting deployment {} in ns {}", name, namespace.unwrap()); | ||||
|         deps.get_opt(name).await | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> { | ||||
| @ -87,7 +117,8 @@ impl K8sClient { | ||||
|         } else { | ||||
|             Api::default_namespaced(self.client.clone()) | ||||
|         }; | ||||
|         Ok(pods.get_opt(name).await?) | ||||
| 
 | ||||
|         pods.get_opt(name).await | ||||
|     } | ||||
| 
 | ||||
|     pub async fn scale_deployment( | ||||
| @ -108,7 +139,7 @@ impl K8sClient { | ||||
|             } | ||||
|         }); | ||||
|         let pp = PatchParams::default(); | ||||
|         let scale = Patch::Apply(&patch); | ||||
|         let scale = Patch::Merge(&patch); | ||||
|         deployments.patch_scale(name, &pp, &scale).await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| @ -130,9 +161,9 @@ impl K8sClient { | ||||
| 
 | ||||
|     pub async fn wait_until_deployment_ready( | ||||
|         &self, | ||||
|         name: String, | ||||
|         name: &str, | ||||
|         namespace: Option<&str>, | ||||
|         timeout: Option<u64>, | ||||
|         timeout: Option<Duration>, | ||||
|     ) -> Result<(), String> { | ||||
|         let api: Api<Deployment>; | ||||
| 
 | ||||
| @ -142,9 +173,9 @@ impl K8sClient { | ||||
|             api = Api::default_namespaced(self.client.clone()); | ||||
|         } | ||||
| 
 | ||||
|         let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed()); | ||||
|         let t = timeout.unwrap_or(300); | ||||
|         let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await; | ||||
|         let establish = await_condition(api, name, conditions::is_deployment_completed()); | ||||
|         let timeout = timeout.unwrap_or(Duration::from_secs(120)); | ||||
|         let res = tokio::time::timeout(timeout, establish).await; | ||||
| 
 | ||||
|         if res.is_ok() { | ||||
|             Ok(()) | ||||
| @ -153,6 +184,41 @@ impl K8sClient { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub async fn wait_for_pod_ready( | ||||
|         &self, | ||||
|         pod_name: &str, | ||||
|         namespace: Option<&str>, | ||||
|     ) -> Result<(), Error> { | ||||
|         let mut elapsed = 0; | ||||
|         let interval = 5; // seconds between checks
 | ||||
|         let timeout_secs = 120; | ||||
|         loop { | ||||
|             let pod = self.get_pod(pod_name, namespace).await?; | ||||
| 
 | ||||
|             if let Some(p) = pod { | ||||
|                 if let Some(status) = p.status { | ||||
|                     if let Some(phase) = status.phase { | ||||
|                         if phase.to_lowercase() == "running" { | ||||
|                             return Ok(()); | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             if elapsed >= timeout_secs { | ||||
|                 return Err(Error::Discovery(DiscoveryError::MissingResource(format!( | ||||
|                     "'{}' in ns '{}' did not become ready within {}s", | ||||
|                     pod_name, | ||||
|                     namespace.unwrap(), | ||||
|                     timeout_secs | ||||
|                 )))); | ||||
|             } | ||||
| 
 | ||||
|             sleep(Duration::from_secs(interval)).await; | ||||
|             elapsed += interval; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Will execute a commond in the first pod found that matches the specified label
 | ||||
|     /// '{label}={name}'
 | ||||
|     pub async fn exec_app_capture_output( | ||||
| @ -199,7 +265,7 @@ impl K8sClient { | ||||
| 
 | ||||
|                 if let Some(s) = status.status { | ||||
|                     let mut stdout_buf = String::new(); | ||||
|                     if let Some(mut stdout) = process.stdout().take() { | ||||
|                     if let Some(mut stdout) = process.stdout() { | ||||
|                         stdout | ||||
|                             .read_to_string(&mut stdout_buf) | ||||
|                             .await | ||||
| @ -305,14 +371,14 @@ impl K8sClient { | ||||
|                 Ok(current) => { | ||||
|                     trace!("Received current value {current:#?}"); | ||||
|                     // The resource exists, so we calculate and display a diff.
 | ||||
|                     println!("\nPerforming dry-run for resource: '{}'", name); | ||||
|                     println!("\nPerforming dry-run for resource: '{name}'"); | ||||
|                     let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| { | ||||
|                         panic!("Could not serialize current value : {current:#?}") | ||||
|                     }); | ||||
|                     if current_yaml.is_mapping() && current_yaml.get("status").is_some() { | ||||
|                         let map = current_yaml.as_mapping_mut().unwrap(); | ||||
|                         let removed = map.remove_entry("status"); | ||||
|                         trace!("Removed status {:?}", removed); | ||||
|                         trace!("Removed status {removed:?}"); | ||||
|                     } else { | ||||
|                         trace!( | ||||
|                             "Did not find status entry for current object {}/{}", | ||||
| @ -341,14 +407,14 @@ impl K8sClient { | ||||
|                             similar::ChangeTag::Insert => "+", | ||||
|                             similar::ChangeTag::Equal => " ", | ||||
|                         }; | ||||
|                         print!("{}{}", sign, change); | ||||
|                         print!("{sign}{change}"); | ||||
|                     } | ||||
|                     // In a dry run, we return the new resource state that would have been applied.
 | ||||
|                     Ok(resource.clone()) | ||||
|                 } | ||||
|                 Err(Error::Api(ErrorResponse { code: 404, .. })) => { | ||||
|                     // The resource does not exist, so the "diff" is the entire new resource.
 | ||||
|                     println!("\nPerforming dry-run for new resource: '{}'", name); | ||||
|                     println!("\nPerforming dry-run for new resource: '{name}'"); | ||||
|                     println!( | ||||
|                         "Resource does not exist. It would be created with the following content:" | ||||
|                     ); | ||||
| @ -357,14 +423,14 @@ impl K8sClient { | ||||
| 
 | ||||
|                     // Print each line of the new resource with a '+' prefix.
 | ||||
|                     for line in new_yaml.lines() { | ||||
|                         println!("+{}", line); | ||||
|                         println!("+{line}"); | ||||
|                     } | ||||
|                     // In a dry run, we return the new resource state that would have been created.
 | ||||
|                     Ok(resource.clone()) | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     // Another API error occurred.
 | ||||
|                     error!("Failed to get resource '{}': {}", name, e); | ||||
|                     error!("Failed to get resource '{name}': {e}"); | ||||
|                     Err(e) | ||||
|                 } | ||||
|             } | ||||
| @ -379,7 +445,7 @@ impl K8sClient { | ||||
|     where | ||||
|         K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize, | ||||
|         <K as Resource>::Scope: ApplyStrategy<K>, | ||||
|         <K as kube::Resource>::DynamicType: Default, | ||||
|         <K as Resource>::DynamicType: Default, | ||||
|     { | ||||
|         let mut result = Vec::new(); | ||||
|         for r in resource.iter() { | ||||
| @ -419,9 +485,12 @@ impl K8sClient { | ||||
|             .as_str() | ||||
|             .expect("couldn't get kind as str"); | ||||
| 
 | ||||
|         let split: Vec<&str> = api_version.splitn(2, "/").collect(); | ||||
|         let g = split[0]; | ||||
|         let v = split[1]; | ||||
|         let mut it = api_version.splitn(2, '/'); | ||||
|         let first = it.next().unwrap(); | ||||
|         let (g, v) = match it.next() { | ||||
|             Some(second) => (first, second), | ||||
|             None => ("", first), | ||||
|         }; | ||||
| 
 | ||||
|         let gvk = GroupVersionKind::gvk(g, v, kind); | ||||
|         let api_resource = ApiResource::from_gvk(&gvk); | ||||
| @ -441,10 +510,7 @@ impl K8sClient { | ||||
| 
 | ||||
|         // 6. Apply the object to the cluster using Server-Side Apply.
 | ||||
|         //    This will create the resource if it doesn't exist, or update it if it does.
 | ||||
|         println!( | ||||
|             "Applying Argo Application '{}' in namespace '{}'...", | ||||
|             name, namespace | ||||
|         ); | ||||
|         println!("Applying '{name}' in namespace '{namespace}'...",); | ||||
|         let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
 | ||||
|         let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?; | ||||
| 
 | ||||
| @ -453,6 +519,51 @@ impl K8sClient { | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Apply a resource from a URL
 | ||||
|     ///
 | ||||
|     /// It is the equivalent of `kubectl apply -f <url>`
 | ||||
|     pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> { | ||||
|         let patch_params = PatchParams::apply("harmony"); | ||||
|         let discovery = kube::Discovery::new(self.client.clone()).run().await?; | ||||
| 
 | ||||
|         let yaml = reqwest::get(url) | ||||
|             .await | ||||
|             .expect("Could not get URL") | ||||
|             .text() | ||||
|             .await | ||||
|             .expect("Could not get content from URL"); | ||||
| 
 | ||||
|         for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") { | ||||
|             let obj: DynamicObject = | ||||
|                 serde_yaml::from_value(doc).expect("cannot apply without valid YAML"); | ||||
|             let namespace = obj.metadata.namespace.as_deref().or(ns); | ||||
|             let type_meta = obj | ||||
|                 .types | ||||
|                 .as_ref() | ||||
|                 .expect("cannot apply object without valid TypeMeta"); | ||||
|             let gvk = GroupVersionKind::try_from(type_meta) | ||||
|                 .expect("cannot apply object without valid GroupVersionKind"); | ||||
|             let name = obj.name_any(); | ||||
| 
 | ||||
|             if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) { | ||||
|                 let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false); | ||||
|                 trace!( | ||||
|                     "Applying {}: \n{}", | ||||
|                     gvk.kind, | ||||
|                     serde_yaml::to_string(&obj).expect("Failed to serialize YAML") | ||||
|                 ); | ||||
|                 let data: serde_json::Value = | ||||
|                     serde_json::to_value(&obj).expect("Failed to serialize JSON"); | ||||
|                 let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?; | ||||
|                 debug!("applied {} {}", gvk.kind, name); | ||||
|             } else { | ||||
|                 warn!("Cannot apply document for unknown {gvk:?}"); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> { | ||||
|         let k = match Kubeconfig::read_from(path) { | ||||
|             Ok(k) => k, | ||||
| @ -472,6 +583,31 @@ impl K8sClient { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn get_dynamic_api( | ||||
|     resource: ApiResource, | ||||
|     capabilities: ApiCapabilities, | ||||
|     client: Client, | ||||
|     ns: Option<&str>, | ||||
|     all: bool, | ||||
| ) -> Api<DynamicObject> { | ||||
|     if capabilities.scope == Scope::Cluster || all { | ||||
|         Api::all_with(client, &resource) | ||||
|     } else if let Some(namespace) = ns { | ||||
|         Api::namespaced_with(client, namespace, &resource) | ||||
|     } else { | ||||
|         Api::default_namespaced_with(client, &resource) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> { | ||||
|     use serde::Deserialize; | ||||
|     let mut docs = vec![]; | ||||
|     for de in serde_yaml::Deserializer::from_str(data) { | ||||
|         docs.push(serde_yaml::Value::deserialize(de)?); | ||||
|     } | ||||
|     Ok(docs) | ||||
| } | ||||
| 
 | ||||
| pub trait ApplyStrategy<K: Resource> { | ||||
|     fn get_api(client: &Client, ns: Option<&str>) -> Api<K>; | ||||
| } | ||||
|  | ||||
| @ -1,6 +1,12 @@ | ||||
| use std::{process::Command, sync::Arc}; | ||||
| use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use base64::{Engine, engine::general_purpose}; | ||||
| use k8s_openapi::api::{ | ||||
|     core::v1::Secret, | ||||
|     rbac::v1::{ClusterRoleBinding, RoleRef, Subject}, | ||||
| }; | ||||
| use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta}; | ||||
| use log::{debug, info, warn}; | ||||
| use serde::Serialize; | ||||
| use tokio::sync::OnceCell; | ||||
| @ -11,17 +17,30 @@ use crate::{ | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         k3d::K3DInstallationScore, | ||||
|         monitoring::kube_prometheus::crd::{ | ||||
|             crd_alertmanager_config::CRDPrometheus, | ||||
|             prometheus_operator::prometheus_operator_helm_chart_score, | ||||
|             rhob_alertmanager_config::RHOBObservability, | ||||
|         k8s::ingress::{K8sIngressScore, PathType}, | ||||
|         monitoring::{ | ||||
|             grafana::{grafana::Grafana, helm::helm_grafana::grafana_helm_chart_score}, | ||||
|             kube_prometheus::crd::{ | ||||
|                 crd_alertmanager_config::CRDPrometheus, | ||||
|                 crd_grafana::{ | ||||
|                     Grafana as GrafanaCRD, GrafanaCom, GrafanaDashboard, | ||||
|                     GrafanaDashboardDatasource, GrafanaDashboardSpec, GrafanaDatasource, | ||||
|                     GrafanaDatasourceConfig, GrafanaDatasourceJsonData, | ||||
|                     GrafanaDatasourceSecureJsonData, GrafanaDatasourceSpec, GrafanaSpec, | ||||
|                 }, | ||||
|                 crd_prometheuses::LabelSelector, | ||||
|                 prometheus_operator::prometheus_operator_helm_chart_score, | ||||
|                 rhob_alertmanager_config::RHOBObservability, | ||||
|                 service_monitor::ServiceMonitor, | ||||
|             }, | ||||
|         }, | ||||
|         prometheus::{ | ||||
|             k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore, | ||||
|             prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore, | ||||
|             prometheus::PrometheusMonitoring, rhob_alerting_score::RHOBAlertingScore, | ||||
|         }, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::ingress::Ingress, | ||||
| }; | ||||
| 
 | ||||
| use super::{ | ||||
| @ -45,6 +64,13 @@ struct K8sState { | ||||
|     message: String, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub enum KubernetesDistribution { | ||||
|     OpenshiftFamily, | ||||
|     K3sFamily, | ||||
|     Default, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| enum K8sSource { | ||||
|     LocalK3d, | ||||
| @ -55,6 +81,7 @@ enum K8sSource { | ||||
| pub struct K8sAnywhereTopology { | ||||
|     k8s_state: Arc<OnceCell<Option<K8sState>>>, | ||||
|     tenant_manager: Arc<OnceCell<K8sTenantManager>>, | ||||
|     k8s_distribution: Arc<OnceCell<KubernetesDistribution>>, | ||||
|     config: Arc<K8sAnywhereConfig>, | ||||
| } | ||||
| 
 | ||||
| @ -76,41 +103,172 @@ impl K8sclient for K8sAnywhereTopology { | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology { | ||||
| impl Grafana for K8sAnywhereTopology { | ||||
|     async fn ensure_grafana_operator( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|     ) -> Result<PreparationOutcome, PreparationError> { | ||||
|         debug!("ensure grafana operator"); | ||||
|         let client = self.k8s_client().await.unwrap(); | ||||
|         let grafana_gvk = GroupVersionKind { | ||||
|             group: "grafana.integreatly.org".to_string(), | ||||
|             version: "v1beta1".to_string(), | ||||
|             kind: "Grafana".to_string(), | ||||
|         }; | ||||
|         let name = "grafanas.grafana.integreatly.org"; | ||||
|         let ns = "grafana"; | ||||
| 
 | ||||
|         let grafana_crd = client | ||||
|             .get_resource_json_value(name, Some(ns), &grafana_gvk) | ||||
|             .await; | ||||
|         match grafana_crd { | ||||
|             Ok(_) => { | ||||
|                 return Ok(PreparationOutcome::Success { | ||||
|                     details: "Found grafana CRDs in cluster".to_string(), | ||||
|                 }); | ||||
|             } | ||||
| 
 | ||||
|             Err(_) => { | ||||
|                 return self | ||||
|                     .install_grafana_operator(inventory, Some("grafana")) | ||||
|                     .await; | ||||
|             } | ||||
|         }; | ||||
|     } | ||||
|     async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError> { | ||||
|         let ns = "grafana"; | ||||
| 
 | ||||
|         let mut label = BTreeMap::new(); | ||||
| 
 | ||||
|         label.insert("dashboards".to_string(), "grafana".to_string()); | ||||
| 
 | ||||
|         let label_selector = LabelSelector { | ||||
|             match_labels: label.clone(), | ||||
|             match_expressions: vec![], | ||||
|         }; | ||||
| 
 | ||||
|         let client = self.k8s_client().await?; | ||||
| 
 | ||||
|         let grafana = self.build_grafana(ns, &label); | ||||
| 
 | ||||
|         client.apply(&grafana, Some(ns)).await?; | ||||
|         //TODO change this to a ensure ready or something better than just a timeout
 | ||||
|         client | ||||
|             .wait_until_deployment_ready( | ||||
|                 "grafana-grafana-deployment", | ||||
|                 Some("grafana"), | ||||
|                 Some(Duration::from_secs(30)), | ||||
|             ) | ||||
|             .await?; | ||||
| 
 | ||||
|         let sa_name = "grafana-grafana-sa"; | ||||
|         let token_secret_name = "grafana-sa-token-secret"; | ||||
| 
 | ||||
|         let sa_token_secret = self.build_sa_token_secret(token_secret_name, sa_name, ns); | ||||
| 
 | ||||
|         client.apply(&sa_token_secret, Some(ns)).await?; | ||||
|         let secret_gvk = GroupVersionKind { | ||||
|             group: "".to_string(), | ||||
|             version: "v1".to_string(), | ||||
|             kind: "Secret".to_string(), | ||||
|         }; | ||||
| 
 | ||||
|         let secret = client | ||||
|             .get_resource_json_value(token_secret_name, Some(ns), &secret_gvk) | ||||
|             .await?; | ||||
| 
 | ||||
|         let token = format!( | ||||
|             "Bearer {}", | ||||
|             self.extract_and_normalize_token(&secret).unwrap() | ||||
|         ); | ||||
| 
 | ||||
|         debug!("creating grafana clusterrole binding"); | ||||
| 
 | ||||
|         let clusterrolebinding = | ||||
|             self.build_cluster_rolebinding(sa_name, "cluster-monitoring-view", ns); | ||||
| 
 | ||||
|         client.apply(&clusterrolebinding, Some(ns)).await?; | ||||
| 
 | ||||
|         debug!("creating grafana datasource crd"); | ||||
| 
 | ||||
|         let thanos_url = format!( | ||||
|             "https://{}", | ||||
|             self.get_domain("thanos-querier-openshift-monitoring") | ||||
|                 .await | ||||
|                 .unwrap() | ||||
|         ); | ||||
| 
 | ||||
|         let thanos_openshift_datasource = self.build_grafana_datasource( | ||||
|             "thanos-openshift-monitoring", | ||||
|             ns, | ||||
|             &label_selector, | ||||
|             &thanos_url, | ||||
|             &token, | ||||
|         ); | ||||
| 
 | ||||
|         client.apply(&thanos_openshift_datasource, Some(ns)).await?; | ||||
| 
 | ||||
|         debug!("creating grafana dashboard crd"); | ||||
|         let dashboard = self.build_grafana_dashboard(ns, &label_selector); | ||||
| 
 | ||||
|         client.apply(&dashboard, Some(ns)).await?; | ||||
|         debug!("creating grafana ingress"); | ||||
|         let grafana_ingress = self.build_grafana_ingress(ns).await; | ||||
| 
 | ||||
|         grafana_ingress | ||||
|             .interpret(&Inventory::empty(), self) | ||||
|             .await | ||||
|             .map_err(|e| PreparationError::new(e.to_string()))?; | ||||
| 
 | ||||
|         Ok(PreparationOutcome::Success { | ||||
|             details: "Installed grafana composants".to_string(), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl PrometheusMonitoring<CRDPrometheus> for K8sAnywhereTopology { | ||||
|     async fn install_prometheus( | ||||
|         &self, | ||||
|         sender: &CRDPrometheus, | ||||
|         inventory: &Inventory, | ||||
|         receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, | ||||
|         _inventory: &Inventory, | ||||
|         _receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, | ||||
|     ) -> Result<PreparationOutcome, PreparationError> { | ||||
|         let client = self.k8s_client().await?; | ||||
| 
 | ||||
|         for monitor in sender.service_monitor.iter() { | ||||
|             client | ||||
|                 .apply(monitor, Some(&sender.namespace)) | ||||
|                 .await | ||||
|                 .map_err(|e| PreparationError::new(e.to_string()))?; | ||||
|         } | ||||
|         Ok(PreparationOutcome::Success { | ||||
|             details: "successfuly installed prometheus components".to_string(), | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     async fn ensure_prometheus_operator( | ||||
|         &self, | ||||
|         sender: &CRDPrometheus, | ||||
|         _inventory: &Inventory, | ||||
|     ) -> Result<PreparationOutcome, PreparationError> { | ||||
|         let po_result = self.ensure_prometheus_operator(sender).await?; | ||||
| 
 | ||||
|         if po_result == PreparationOutcome::Noop { | ||||
|             debug!("Skipping Prometheus CR installation due to missing operator."); | ||||
|             return Ok(po_result); | ||||
|         } | ||||
| 
 | ||||
|         let result = self | ||||
|             .get_k8s_prometheus_application_score(sender.clone(), receivers) | ||||
|             .await | ||||
|             .interpret(inventory, self) | ||||
|             .await; | ||||
| 
 | ||||
|         match result { | ||||
|             Ok(outcome) => match outcome.status { | ||||
|                 InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success { | ||||
|                     details: outcome.message, | ||||
|                 }), | ||||
|                 InterpretStatus::NOOP => Ok(PreparationOutcome::Noop), | ||||
|                 _ => Err(PreparationError::new(outcome.message)), | ||||
|             }, | ||||
|             Err(err) => Err(PreparationError::new(err.to_string())), | ||||
|         match po_result { | ||||
|             PreparationOutcome::Success { details: _ } => { | ||||
|                 debug!("Detected prometheus crds operator present in cluster."); | ||||
|                 return Ok(po_result); | ||||
|             } | ||||
|             PreparationOutcome::Noop => { | ||||
|                 debug!("Skipping Prometheus CR installation due to missing operator."); | ||||
|                 return Ok(po_result); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology { | ||||
| impl PrometheusMonitoring<RHOBObservability> for K8sAnywhereTopology { | ||||
|     async fn install_prometheus( | ||||
|         &self, | ||||
|         sender: &RHOBObservability, | ||||
| @ -144,6 +302,14 @@ impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology | ||||
|             Err(err) => Err(PreparationError::new(err.to_string())), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn ensure_prometheus_operator( | ||||
|         &self, | ||||
|         sender: &RHOBObservability, | ||||
|         inventory: &Inventory, | ||||
|     ) -> Result<PreparationOutcome, PreparationError> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Serialize for K8sAnywhereTopology { | ||||
| @ -160,6 +326,7 @@ impl K8sAnywhereTopology { | ||||
|         Self { | ||||
|             k8s_state: Arc::new(OnceCell::new()), | ||||
|             tenant_manager: Arc::new(OnceCell::new()), | ||||
|             k8s_distribution: Arc::new(OnceCell::new()), | ||||
|             config: Arc::new(K8sAnywhereConfig::from_env()), | ||||
|         } | ||||
|     } | ||||
| @ -168,10 +335,216 @@ impl K8sAnywhereTopology { | ||||
|         Self { | ||||
|             k8s_state: Arc::new(OnceCell::new()), | ||||
|             tenant_manager: Arc::new(OnceCell::new()), | ||||
|             k8s_distribution: Arc::new(OnceCell::new()), | ||||
|             config: Arc::new(config), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> { | ||||
|         self.k8s_distribution | ||||
|             .get_or_try_init(async || { | ||||
|                 let client = self.k8s_client().await.unwrap(); | ||||
| 
 | ||||
|                 let discovery = client.discovery().await.map_err(|e| { | ||||
|                     PreparationError::new(format!("Could not discover API groups: {}", e)) | ||||
|                 })?; | ||||
| 
 | ||||
|                 let version = client.get_apiserver_version().await.map_err(|e| { | ||||
|                     PreparationError::new(format!("Could not get server version: {}", e)) | ||||
|                 })?; | ||||
| 
 | ||||
|                 // OpenShift / OKD
 | ||||
|                 if discovery | ||||
|                     .groups() | ||||
|                     .any(|g| g.name() == "project.openshift.io") | ||||
|                 { | ||||
|                     return Ok(KubernetesDistribution::OpenshiftFamily); | ||||
|                 } | ||||
| 
 | ||||
|                 // K3d / K3s
 | ||||
|                 if version.git_version.contains("k3s") { | ||||
|                     return Ok(KubernetesDistribution::K3sFamily); | ||||
|                 } | ||||
| 
 | ||||
|                 return Ok(KubernetesDistribution::Default); | ||||
|             }) | ||||
|             .await | ||||
|     } | ||||
| 
 | ||||
|     fn extract_and_normalize_token(&self, secret: &DynamicObject) -> Option<String> { | ||||
|         let token_b64 = secret | ||||
|             .data | ||||
|             .get("token") | ||||
|             .or_else(|| secret.data.get("data").and_then(|d| d.get("token"))) | ||||
|             .and_then(|v| v.as_str())?; | ||||
| 
 | ||||
|         let bytes = general_purpose::STANDARD.decode(token_b64).ok()?; | ||||
| 
 | ||||
|         let s = String::from_utf8(bytes).ok()?; | ||||
| 
 | ||||
|         let cleaned = s | ||||
|             .trim_matches(|c: char| c.is_whitespace() || c == '\0') | ||||
|             .to_string(); | ||||
|         Some(cleaned) | ||||
|     } | ||||
| 
 | ||||
|     pub fn build_cluster_rolebinding( | ||||
|         &self, | ||||
|         service_account_name: &str, | ||||
|         clusterrole_name: &str, | ||||
|         ns: &str, | ||||
|     ) -> ClusterRoleBinding { | ||||
|         ClusterRoleBinding { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(format!("{}-view-binding", service_account_name)), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             role_ref: RoleRef { | ||||
|                 api_group: "rbac.authorization.k8s.io".into(), | ||||
|                 kind: "ClusterRole".into(), | ||||
|                 name: clusterrole_name.into(), | ||||
|             }, | ||||
|             subjects: Some(vec![Subject { | ||||
|                 kind: "ServiceAccount".into(), | ||||
|                 name: service_account_name.into(), | ||||
|                 namespace: Some(ns.into()), | ||||
|                 ..Default::default() | ||||
|             }]), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn build_sa_token_secret( | ||||
|         &self, | ||||
|         secret_name: &str, | ||||
|         service_account_name: &str, | ||||
|         ns: &str, | ||||
|     ) -> Secret { | ||||
|         let mut annotations = BTreeMap::new(); | ||||
|         annotations.insert( | ||||
|             "kubernetes.io/service-account.name".to_string(), | ||||
|             service_account_name.to_string(), | ||||
|         ); | ||||
| 
 | ||||
|         Secret { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(secret_name.into()), | ||||
|                 namespace: Some(ns.into()), | ||||
|                 annotations: Some(annotations), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             type_: Some("kubernetes.io/service-account-token".to_string()), | ||||
|             ..Default::default() | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn build_grafana_datasource( | ||||
|         &self, | ||||
|         name: &str, | ||||
|         ns: &str, | ||||
|         label_selector: &LabelSelector, | ||||
|         url: &str, | ||||
|         token: &str, | ||||
|     ) -> GrafanaDatasource { | ||||
|         let mut json_data = BTreeMap::new(); | ||||
|         json_data.insert("timeInterval".to_string(), "5s".to_string()); | ||||
| 
 | ||||
|         GrafanaDatasource { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(name.to_string()), | ||||
|                 namespace: Some(ns.to_string()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: GrafanaDatasourceSpec { | ||||
|                 instance_selector: label_selector.clone(), | ||||
|                 allow_cross_namespace_import: Some(true), | ||||
|                 values_from: None, | ||||
|                 datasource: GrafanaDatasourceConfig { | ||||
|                     access: "proxy".to_string(), | ||||
|                     name: name.to_string(), | ||||
|                     r#type: "prometheus".to_string(), | ||||
|                     url: url.to_string(), | ||||
|                     database: None, | ||||
|                     json_data: Some(GrafanaDatasourceJsonData { | ||||
|                         time_interval: Some("60s".to_string()), | ||||
|                         http_header_name1: Some("Authorization".to_string()), | ||||
|                         tls_skip_verify: Some(true), | ||||
|                         oauth_pass_thru: Some(true), | ||||
|                     }), | ||||
|                     secure_json_data: Some(GrafanaDatasourceSecureJsonData { | ||||
|                         http_header_value1: Some(format!("Bearer {token}")), | ||||
|                     }), | ||||
|                     is_default: Some(false), | ||||
|                     editable: Some(true), | ||||
|                 }, | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn build_grafana_dashboard( | ||||
|         &self, | ||||
|         ns: &str, | ||||
|         label_selector: &LabelSelector, | ||||
|     ) -> GrafanaDashboard { | ||||
|         let graf_dashboard = GrafanaDashboard { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(format!("grafana-dashboard-{}", ns)), | ||||
|                 namespace: Some(ns.to_string()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: GrafanaDashboardSpec { | ||||
|                 resync_period: Some("30s".to_string()), | ||||
|                 instance_selector: label_selector.clone(), | ||||
|                 datasources: Some(vec![GrafanaDashboardDatasource { | ||||
|                     input_name: "DS_PROMETHEUS".to_string(), | ||||
|                     datasource_name: "thanos-openshift-monitoring".to_string(), | ||||
|                 }]), | ||||
|                 json: None, | ||||
|                 grafana_com: Some(GrafanaCom { | ||||
|                     id: 17406, | ||||
|                     revision: None, | ||||
|                 }), | ||||
|             }, | ||||
|         }; | ||||
|         graf_dashboard | ||||
|     } | ||||
| 
 | ||||
|     fn build_grafana(&self, ns: &str, labels: &BTreeMap<String, String>) -> GrafanaCRD { | ||||
|         let grafana = GrafanaCRD { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(format!("grafana-{}", ns)), | ||||
|                 namespace: Some(ns.to_string()), | ||||
|                 labels: Some(labels.clone()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: GrafanaSpec { | ||||
|                 config: None, | ||||
|                 admin_user: None, | ||||
|                 admin_password: None, | ||||
|                 ingress: None, | ||||
|                 persistence: None, | ||||
|                 resources: None, | ||||
|             }, | ||||
|         }; | ||||
|         grafana | ||||
|     } | ||||
| 
 | ||||
|     async fn build_grafana_ingress(&self, ns: &str) -> K8sIngressScore { | ||||
|         let domain = self.get_domain(&format!("grafana-{}", ns)).await.unwrap(); | ||||
|         let name = format!("{}-grafana", ns); | ||||
|         let backend_service = format!("grafana-{}-service", ns); | ||||
| 
 | ||||
|         K8sIngressScore { | ||||
|             name: fqdn::fqdn!(&name), | ||||
|             host: fqdn::fqdn!(&domain), | ||||
|             backend_service: fqdn::fqdn!(&backend_service), | ||||
|             port: 3000, | ||||
|             path: Some("/".to_string()), | ||||
|             path_type: Some(PathType::Prefix), | ||||
|             namespace: Some(fqdn::fqdn!(&ns)), | ||||
|             ingress_class_name: Some("openshift-default".to_string()), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn get_cluster_observability_operator_prometheus_application_score( | ||||
|         &self, | ||||
|         sender: RHOBObservability, | ||||
| @ -189,12 +562,33 @@ impl K8sAnywhereTopology { | ||||
|         &self, | ||||
|         sender: CRDPrometheus, | ||||
|         receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>, | ||||
|         service_monitors: Option<Vec<ServiceMonitor>>, | ||||
|     ) -> K8sPrometheusCRDAlertingScore { | ||||
|         K8sPrometheusCRDAlertingScore { | ||||
|         return K8sPrometheusCRDAlertingScore { | ||||
|             sender, | ||||
|             receivers: receivers.unwrap_or_default(), | ||||
|             service_monitors: vec![], | ||||
|             service_monitors: service_monitors.unwrap_or_default(), | ||||
|             prometheus_rules: vec![], | ||||
|         }; | ||||
|     } | ||||
| 
 | ||||
|     async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> { | ||||
|         let client = self.k8s_client().await?; | ||||
|         let gvk = GroupVersionKind { | ||||
|             group: "operator.openshift.io".into(), | ||||
|             version: "v1".into(), | ||||
|             kind: "IngressController".into(), | ||||
|         }; | ||||
|         let ic = client | ||||
|             .get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk) | ||||
|             .await?; | ||||
|         let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0); | ||||
|         if ready_replicas >= 1 { | ||||
|             return Ok(()); | ||||
|         } else { | ||||
|             return Err(PreparationError::new( | ||||
|                 "openshift-ingress-operator not available".to_string(), | ||||
|             )); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -350,6 +744,10 @@ impl K8sAnywhereTopology { | ||||
|             if let Some(Some(k8s_state)) = self.k8s_state.get() { | ||||
|                 match k8s_state.source { | ||||
|                     K8sSource::LocalK3d => { | ||||
|                         warn!( | ||||
|                             "Installing observability operator is not supported on LocalK3d source" | ||||
|                         ); | ||||
|                         return Ok(PreparationOutcome::Noop); | ||||
|                         debug!("installing cluster observability operator"); | ||||
|                         todo!(); | ||||
|                         let op_score = | ||||
| @ -439,6 +837,30 @@ impl K8sAnywhereTopology { | ||||
|             details: "prometheus operator present in cluster".into(), | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     async fn install_grafana_operator( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         ns: Option<&str>, | ||||
|     ) -> Result<PreparationOutcome, PreparationError> { | ||||
|         let namespace = ns.unwrap_or("grafana"); | ||||
|         info!("installing grafana operator in ns {namespace}"); | ||||
|         let tenant = self.get_k8s_tenant_manager()?.get_tenant_config().await; | ||||
|         let mut namespace_scope = false; | ||||
|         if tenant.is_some() { | ||||
|             namespace_scope = true; | ||||
|         } | ||||
|         let _grafana_operator_score = grafana_helm_chart_score(namespace, namespace_scope) | ||||
|             .interpret(inventory, self) | ||||
|             .await | ||||
|             .map_err(|e| PreparationError::new(e.to_string())); | ||||
|         Ok(PreparationOutcome::Success { | ||||
|             details: format!( | ||||
|                 "Successfully installed grafana operator in ns {}", | ||||
|                 ns.unwrap() | ||||
|             ), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug)] | ||||
| @ -528,7 +950,7 @@ impl MultiTargetTopology for K8sAnywhereTopology { | ||||
|         match self.config.harmony_profile.to_lowercase().as_str() { | ||||
|             "staging" => DeploymentTarget::Staging, | ||||
|             "production" => DeploymentTarget::Production, | ||||
|             _ => todo!("HARMONY_PROFILE must be set when use_local_k3d is not set"), | ||||
|             _ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -550,3 +972,45 @@ impl TenantManager for K8sAnywhereTopology { | ||||
|             .await | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Ingress for K8sAnywhereTopology { | ||||
|     //TODO this is specifically for openshift/okd which violates the k8sanywhere idea
 | ||||
|     async fn get_domain(&self, service: &str) -> Result<String, PreparationError> { | ||||
|         let client = self.k8s_client().await?; | ||||
| 
 | ||||
|         if let Some(Some(k8s_state)) = self.k8s_state.get() { | ||||
|             match k8s_state.source { | ||||
|                 K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")), | ||||
|                 K8sSource::Kubeconfig => { | ||||
|                     self.openshift_ingress_operator_available().await?; | ||||
| 
 | ||||
|                     let gvk = GroupVersionKind { | ||||
|                         group: "operator.openshift.io".into(), | ||||
|                         version: "v1".into(), | ||||
|                         kind: "IngressController".into(), | ||||
|                     }; | ||||
|                     let ic = client | ||||
|                         .get_resource_json_value( | ||||
|                             "default", | ||||
|                             Some("openshift-ingress-operator"), | ||||
|                             &gvk, | ||||
|                         ) | ||||
|                         .await | ||||
|                         .map_err(|_| { | ||||
|                             PreparationError::new("Failed to fetch IngressController".to_string()) | ||||
|                         })?; | ||||
| 
 | ||||
|                     match ic.data["status"]["domain"].as_str() { | ||||
|                         Some(domain) => Ok(format!("{service}.{domain}")), | ||||
|                         None => Err(PreparationError::new("Could not find domain".to_string())), | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } else { | ||||
|             Err(PreparationError::new( | ||||
|                 "Cannot get domain: unable to detect K8s state".to_string(), | ||||
|             )) | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync { | ||||
|         &self, | ||||
|         service: &LoadBalancerService, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         debug!( | ||||
|             "Listing LoadBalancer services {:?}", | ||||
|             self.list_services().await | ||||
|         ); | ||||
|         if !self.list_services().await.contains(service) { | ||||
|             self.add_service(service).await?; | ||||
|         } | ||||
|         self.add_service(service).await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,9 +1,10 @@ | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| use super::{HelmCommand, PreparationError, PreparationOutcome, Topology}; | ||||
| 
 | ||||
| #[derive(new)] | ||||
| #[derive(new, Clone, Debug, Serialize, Deserialize)] | ||||
| pub struct LocalhostTopology; | ||||
| 
 | ||||
| #[async_trait] | ||||
|  | ||||
| @ -1,4 +1,5 @@ | ||||
| mod ha_cluster; | ||||
| pub mod ingress; | ||||
| use harmony_types::net::IpAddress; | ||||
| mod host_binding; | ||||
| mod http; | ||||
|  | ||||
| @ -1,10 +1,21 @@ | ||||
| use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; | ||||
| use std::{ | ||||
|     error::Error, | ||||
|     fmt::{self, Debug}, | ||||
|     net::Ipv4Addr, | ||||
|     str::FromStr, | ||||
|     sync::Arc, | ||||
| }; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::net::{IpAddress, MacAddress}; | ||||
| use derive_new::new; | ||||
| use harmony_types::{ | ||||
|     id::Id, | ||||
|     net::{IpAddress, MacAddress}, | ||||
|     switch::PortLocation, | ||||
| }; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::executors::ExecutorError; | ||||
| use crate::{executors::ExecutorError, hardware::PhysicalHost}; | ||||
| 
 | ||||
| use super::{LogicalHost, k8s::K8sClient}; | ||||
| 
 | ||||
| @ -15,8 +26,8 @@ pub struct DHCPStaticEntry { | ||||
|     pub ip: Ipv4Addr, | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for DHCPStaticEntry { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
| impl fmt::Display for DHCPStaticEntry { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         let mac = self | ||||
|             .mac | ||||
|             .iter() | ||||
| @ -38,8 +49,8 @@ pub trait Firewall: Send + Sync { | ||||
|     fn get_host(&self) -> LogicalHost; | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Debug for dyn Firewall { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
| impl Debug for dyn Firewall { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         f.write_fmt(format_args!("Firewall {}", self.get_ip())) | ||||
|     } | ||||
| } | ||||
| @ -61,7 +72,7 @@ pub struct PxeOptions { | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait DhcpServer: Send + Sync + std::fmt::Debug { | ||||
| pub trait DhcpServer: Send + Sync + Debug { | ||||
|     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; | ||||
|     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; | ||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; | ||||
| @ -100,8 +111,8 @@ pub trait DnsServer: Send + Sync { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Debug for dyn DnsServer { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
| impl Debug for dyn DnsServer { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         f.write_fmt(format_args!("DnsServer {}", self.get_ip())) | ||||
|     } | ||||
| } | ||||
| @ -137,8 +148,8 @@ pub enum DnsRecordType { | ||||
|     TXT, | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for DnsRecordType { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
| impl fmt::Display for DnsRecordType { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         match self { | ||||
|             DnsRecordType::A => write!(f, "A"), | ||||
|             DnsRecordType::AAAA => write!(f, "AAAA"), | ||||
| @ -172,6 +183,77 @@ impl FromStr for DnsRecordType { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait Switch: Send + Sync { | ||||
|     async fn setup_switch(&self) -> Result<(), SwitchError>; | ||||
| 
 | ||||
|     async fn get_port_for_mac_address( | ||||
|         &self, | ||||
|         mac_address: &MacAddress, | ||||
|     ) -> Result<Option<PortLocation>, SwitchError>; | ||||
| 
 | ||||
|     async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>; | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug, PartialEq)] | ||||
| pub struct HostNetworkConfig { | ||||
|     pub host_id: Id, | ||||
|     pub switch_ports: Vec<SwitchPort>, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug, PartialEq)] | ||||
| pub struct SwitchPort { | ||||
|     pub interface: NetworkInterface, | ||||
|     pub port: PortLocation, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone, Debug, PartialEq)] | ||||
| pub struct NetworkInterface { | ||||
|     pub name: String, | ||||
|     pub mac_address: MacAddress, | ||||
|     pub speed_mbps: Option<u32>, | ||||
|     pub mtu: u32, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, new)] | ||||
| pub struct SwitchError { | ||||
|     msg: String, | ||||
| } | ||||
| 
 | ||||
| impl fmt::Display for SwitchError { | ||||
|     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||||
|         f.write_str(&self.msg) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Error for SwitchError {} | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait SwitchClient: Debug + Send + Sync { | ||||
|     /// Executes essential, idempotent, one-time initial configuration steps.
 | ||||
|     ///
 | ||||
|     /// This is an opiniated procedure that setups a switch to provide high availability
 | ||||
|     /// capabilities as decided by the NationTech team.
 | ||||
|     ///
 | ||||
|     /// This includes tasks like enabling switchport for all interfaces
 | ||||
|     /// except the ones intended for Fabric Networking, etc.
 | ||||
|     ///
 | ||||
|     /// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
 | ||||
|     /// and that it doesn't break existing configurations.
 | ||||
|     async fn setup(&self) -> Result<(), SwitchError>; | ||||
| 
 | ||||
|     async fn find_port( | ||||
|         &self, | ||||
|         mac_address: &MacAddress, | ||||
|     ) -> Result<Option<PortLocation>, SwitchError>; | ||||
| 
 | ||||
|     async fn configure_port_channel( | ||||
|         &self, | ||||
|         channel_name: &str, | ||||
|         switch_ports: Vec<PortLocation>, | ||||
|     ) -> Result<u8, SwitchError>; | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod test { | ||||
|     use std::sync::Arc; | ||||
|  | ||||
| @ -21,6 +21,7 @@ pub struct AlertingInterpret<S: AlertSender> { | ||||
|     pub sender: S, | ||||
|     pub receivers: Vec<Box<dyn AlertReceiver<S>>>, | ||||
|     pub rules: Vec<Box<dyn AlertRule<S>>>, | ||||
|     pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| @ -30,6 +31,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte | ||||
|         inventory: &Inventory, | ||||
|         topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         debug!("hit sender configure for AlertingInterpret"); | ||||
|         self.sender.configure(inventory, topology).await?; | ||||
|         for receiver in self.receivers.iter() { | ||||
|             receiver.install(&self.sender).await?; | ||||
| @ -38,6 +40,12 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte | ||||
|             debug!("installing rule: {:#?}", rule); | ||||
|             rule.install(&self.sender).await?; | ||||
|         } | ||||
|         if let Some(targets) = &self.scrape_targets { | ||||
|             for target in targets.iter() { | ||||
|                 debug!("installing scrape_target: {:#?}", target); | ||||
|                 target.install(&self.sender).await?; | ||||
|             } | ||||
|         } | ||||
|         self.sender.ensure_installed(inventory, topology).await?; | ||||
|         Ok(Outcome::success(format!( | ||||
|             "successfully installed alert sender {}", | ||||
| @ -77,6 +85,7 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync { | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait ScrapeTarget<S: AlertSender> { | ||||
|     async fn install(&self, sender: &S) -> Result<(), InterpretError>; | ||||
| pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync { | ||||
|     async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>; | ||||
|     fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>; | ||||
| } | ||||
|  | ||||
							
								
								
									
										378
									
								
								harmony/src/infra/brocade.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,378 @@ | ||||
| use async_trait::async_trait; | ||||
| use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode}; | ||||
| use harmony_types::{ | ||||
|     net::{IpAddress, MacAddress}, | ||||
|     switch::{PortDeclaration, PortLocation}, | ||||
| }; | ||||
| use option_ext::OptionExt; | ||||
| 
 | ||||
| use crate::topology::{SwitchClient, SwitchError}; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct BrocadeSwitchClient { | ||||
|     brocade: Box<dyn BrocadeClient + Send + Sync>, | ||||
| } | ||||
| 
 | ||||
| impl BrocadeSwitchClient { | ||||
|     pub async fn init( | ||||
|         ip_addresses: &[IpAddress], | ||||
|         username: &str, | ||||
|         password: &str, | ||||
|         options: Option<BrocadeOptions>, | ||||
|     ) -> Result<Self, brocade::Error> { | ||||
|         let brocade = brocade::init(ip_addresses, 22, username, password, options).await?; | ||||
|         Ok(Self { brocade }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl SwitchClient for BrocadeSwitchClient { | ||||
|     async fn setup(&self) -> Result<(), SwitchError> { | ||||
|         let stack_topology = self | ||||
|             .brocade | ||||
|             .get_stack_topology() | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(e.to_string()))?; | ||||
| 
 | ||||
|         let interfaces = self | ||||
|             .brocade | ||||
|             .get_interfaces() | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(e.to_string()))?; | ||||
| 
 | ||||
|         let interfaces: Vec<(String, PortOperatingMode)> = interfaces | ||||
|             .into_iter() | ||||
|             .filter(|interface| { | ||||
|                 interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected | ||||
|             }) | ||||
|             .filter(|interface| { | ||||
|                 !stack_topology.iter().any(|link: &InterSwitchLink| { | ||||
|                     link.local_port == interface.port_location | ||||
|                         || link.remote_port.contains(&interface.port_location) | ||||
|                 }) | ||||
|             }) | ||||
|             .map(|interface| (interface.name.clone(), PortOperatingMode::Access)) | ||||
|             .collect(); | ||||
| 
 | ||||
|         if interfaces.is_empty() { | ||||
|             return Ok(()); | ||||
|         } | ||||
| 
 | ||||
|         self.brocade | ||||
|             .configure_interfaces(interfaces) | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(e.to_string()))?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn find_port( | ||||
|         &self, | ||||
|         mac_address: &MacAddress, | ||||
|     ) -> Result<Option<PortLocation>, SwitchError> { | ||||
|         let table = self | ||||
|             .brocade | ||||
|             .get_mac_address_table() | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(format!("{e}")))?; | ||||
| 
 | ||||
|         let port = table | ||||
|             .iter() | ||||
|             .find(|entry| entry.mac_address == *mac_address) | ||||
|             .map(|entry| match &entry.port { | ||||
|                 PortDeclaration::Single(port_location) => Ok(port_location.clone()), | ||||
|                 _ => Err(SwitchError::new( | ||||
|                     "Multiple ports found for MAC address".into(), | ||||
|                 )), | ||||
|             }); | ||||
| 
 | ||||
|         match port { | ||||
|             Some(Ok(p)) => Ok(Some(p)), | ||||
|             Some(Err(e)) => Err(e), | ||||
|             None => Ok(None), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_port_channel( | ||||
|         &self, | ||||
|         channel_name: &str, | ||||
|         switch_ports: Vec<PortLocation>, | ||||
|     ) -> Result<u8, SwitchError> { | ||||
|         let channel_id = self | ||||
|             .brocade | ||||
|             .find_available_channel_id() | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(format!("{e}")))?; | ||||
| 
 | ||||
|         self.brocade | ||||
|             .create_port_channel(channel_id, channel_name, &switch_ports) | ||||
|             .await | ||||
|             .map_err(|e| SwitchError::new(format!("{e}")))?; | ||||
| 
 | ||||
|         Ok(channel_id) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use std::sync::{Arc, Mutex}; | ||||
| 
 | ||||
|     use assertor::*; | ||||
|     use async_trait::async_trait; | ||||
|     use brocade::{ | ||||
|         BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus, | ||||
|         InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, | ||||
|     }; | ||||
|     use harmony_types::switch::PortLocation; | ||||
| 
 | ||||
|     use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient}; | ||||
| 
 | ||||
|     #[tokio::test] | ||||
|     async fn setup_should_configure_ethernet_interfaces_as_access_ports() { | ||||
|         let first_interface = given_interface() | ||||
|             .with_port_location(PortLocation(1, 0, 1)) | ||||
|             .build(); | ||||
|         let second_interface = given_interface() | ||||
|             .with_port_location(PortLocation(1, 0, 4)) | ||||
|             .build(); | ||||
|         let brocade = Box::new(FakeBrocadeClient::new( | ||||
|             vec![], | ||||
|             vec![first_interface.clone(), second_interface.clone()], | ||||
|         )); | ||||
|         let client = BrocadeSwitchClient { | ||||
|             brocade: brocade.clone(), | ||||
|         }; | ||||
| 
 | ||||
|         client.setup().await.unwrap(); | ||||
| 
 | ||||
|         let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); | ||||
|         assert_that!(*configured_interfaces).contains_exactly(vec![ | ||||
|             (first_interface.name.clone(), PortOperatingMode::Access), | ||||
|             (second_interface.name.clone(), PortOperatingMode::Access), | ||||
|         ]); | ||||
|     } | ||||
| 
 | ||||
|     #[tokio::test] | ||||
|     async fn setup_with_an_already_configured_interface_should_skip_configuration() { | ||||
|         let brocade = Box::new(FakeBrocadeClient::new( | ||||
|             vec![], | ||||
|             vec![ | ||||
|                 given_interface() | ||||
|                     .with_operating_mode(Some(PortOperatingMode::Access)) | ||||
|                     .build(), | ||||
|             ], | ||||
|         )); | ||||
|         let client = BrocadeSwitchClient { | ||||
|             brocade: brocade.clone(), | ||||
|         }; | ||||
| 
 | ||||
|         client.setup().await.unwrap(); | ||||
| 
 | ||||
|         let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); | ||||
|         assert_that!(*configured_interfaces).is_empty(); | ||||
|     } | ||||
| 
 | ||||
|     #[tokio::test] | ||||
|     async fn setup_with_a_disconnected_interface_should_skip_configuration() { | ||||
|         let brocade = Box::new(FakeBrocadeClient::new( | ||||
|             vec![], | ||||
|             vec![ | ||||
|                 given_interface() | ||||
|                     .with_status(InterfaceStatus::SfpAbsent) | ||||
|                     .build(), | ||||
|                 given_interface() | ||||
|                     .with_status(InterfaceStatus::NotConnected) | ||||
|                     .build(), | ||||
|             ], | ||||
|         )); | ||||
|         let client = BrocadeSwitchClient { | ||||
|             brocade: brocade.clone(), | ||||
|         }; | ||||
| 
 | ||||
|         client.setup().await.unwrap(); | ||||
| 
 | ||||
|         let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); | ||||
|         assert_that!(*configured_interfaces).is_empty(); | ||||
|     } | ||||
| 
 | ||||
|     #[tokio::test] | ||||
|     async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() { | ||||
|         let brocade = Box::new(FakeBrocadeClient::new( | ||||
|             vec![ | ||||
|                 given_inter_switch_link() | ||||
|                     .between(PortLocation(1, 0, 1), PortLocation(2, 0, 1)) | ||||
|                     .build(), | ||||
|                 given_inter_switch_link() | ||||
|                     .between(PortLocation(2, 0, 2), PortLocation(3, 0, 1)) | ||||
|                     .build(), | ||||
|             ], | ||||
|             vec![ | ||||
|                 given_interface() | ||||
|                     .with_port_location(PortLocation(1, 0, 1)) | ||||
|                     .build(), | ||||
|                 given_interface() | ||||
|                     .with_port_location(PortLocation(2, 0, 1)) | ||||
|                     .build(), | ||||
|                 given_interface() | ||||
|                     .with_port_location(PortLocation(3, 0, 1)) | ||||
|                     .build(), | ||||
|             ], | ||||
|         )); | ||||
|         let client = BrocadeSwitchClient { | ||||
|             brocade: brocade.clone(), | ||||
|         }; | ||||
| 
 | ||||
|         client.setup().await.unwrap(); | ||||
| 
 | ||||
|         let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); | ||||
|         assert_that!(*configured_interfaces).is_empty(); | ||||
|     } | ||||
| 
 | ||||
|     #[derive(Debug, Clone)] | ||||
|     struct FakeBrocadeClient { | ||||
|         stack_topology: Vec<InterSwitchLink>, | ||||
|         interfaces: Vec<InterfaceInfo>, | ||||
|         configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>, | ||||
|     } | ||||
| 
 | ||||
|     #[async_trait] | ||||
|     impl BrocadeClient for FakeBrocadeClient { | ||||
|         async fn version(&self) -> Result<BrocadeInfo, Error> { | ||||
|             todo!() | ||||
|         } | ||||
| 
 | ||||
|         async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> { | ||||
|             todo!() | ||||
|         } | ||||
| 
 | ||||
|         async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> { | ||||
|             Ok(self.stack_topology.clone()) | ||||
|         } | ||||
| 
 | ||||
|         async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> { | ||||
|             Ok(self.interfaces.clone()) | ||||
|         } | ||||
| 
 | ||||
|         async fn configure_interfaces( | ||||
|             &self, | ||||
|             interfaces: Vec<(String, PortOperatingMode)>, | ||||
|         ) -> Result<(), Error> { | ||||
|             let mut configured_interfaces = self.configured_interfaces.lock().unwrap(); | ||||
|             *configured_interfaces = interfaces; | ||||
| 
 | ||||
|             Ok(()) | ||||
|         } | ||||
| 
 | ||||
|         async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> { | ||||
|             todo!() | ||||
|         } | ||||
| 
 | ||||
|         async fn create_port_channel( | ||||
|             &self, | ||||
|             _channel_id: PortChannelId, | ||||
|             _channel_name: &str, | ||||
|             _ports: &[PortLocation], | ||||
|         ) -> Result<(), Error> { | ||||
|             todo!() | ||||
|         } | ||||
| 
 | ||||
|         async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> { | ||||
|             todo!() | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     impl FakeBrocadeClient { | ||||
|         fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self { | ||||
|             Self { | ||||
|                 stack_topology, | ||||
|                 interfaces, | ||||
|                 configured_interfaces: Arc::new(Mutex::new(vec![])), | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     struct InterfaceInfoBuilder { | ||||
|         port_location: Option<PortLocation>, | ||||
|         interface_type: Option<InterfaceType>, | ||||
|         operating_mode: Option<PortOperatingMode>, | ||||
|         status: Option<InterfaceStatus>, | ||||
|     } | ||||
| 
 | ||||
|     impl InterfaceInfoBuilder { | ||||
|         fn build(&self) -> InterfaceInfo { | ||||
|             let interface_type = self | ||||
|                 .interface_type | ||||
|                 .clone() | ||||
|                 .unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into())); | ||||
|             let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1)); | ||||
|             let name = format!("{interface_type} {port_location}"); | ||||
|             let status = self.status.clone().unwrap_or(InterfaceStatus::Connected); | ||||
| 
 | ||||
|             InterfaceInfo { | ||||
|                 name, | ||||
|                 port_location, | ||||
|                 interface_type, | ||||
|                 operating_mode: self.operating_mode.clone(), | ||||
|                 status, | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn with_port_location(self, port_location: PortLocation) -> Self { | ||||
|             Self { | ||||
|                 port_location: Some(port_location), | ||||
|                 ..self | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self { | ||||
|             Self { | ||||
|                 operating_mode, | ||||
|                 ..self | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn with_status(self, status: InterfaceStatus) -> Self { | ||||
|             Self { | ||||
|                 status: Some(status), | ||||
|                 ..self | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     struct InterSwitchLinkBuilder { | ||||
|         link: Option<(PortLocation, PortLocation)>, | ||||
|     } | ||||
| 
 | ||||
|     impl InterSwitchLinkBuilder { | ||||
|         fn build(&self) -> InterSwitchLink { | ||||
|             let link = self | ||||
|                 .link | ||||
|                 .clone() | ||||
|                 .unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1))); | ||||
| 
 | ||||
|             InterSwitchLink { | ||||
|                 local_port: link.0, | ||||
|                 remote_port: Some(link.1), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self { | ||||
|             Self { | ||||
|                 link: Some((local_port, remote_port)), | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn given_interface() -> InterfaceInfoBuilder { | ||||
|         InterfaceInfoBuilder { | ||||
|             port_location: None, | ||||
|             interface_type: None, | ||||
|             operating_mode: None, | ||||
|             status: None, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn given_inter_switch_link() -> InterSwitchLinkBuilder { | ||||
|         InterSwitchLinkBuilder { link: None } | ||||
|     } | ||||
| } | ||||
| @ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory; | ||||
| impl InventoryRepositoryFactory { | ||||
|     pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { | ||||
|         Ok(Box::new( | ||||
|             SqliteInventoryRepository::new(&(*DATABASE_URL)).await?, | ||||
|             SqliteInventoryRepository::new(&DATABASE_URL).await?, | ||||
|         )) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| pub mod brocade; | ||||
| pub mod executors; | ||||
| pub mod hp_ilo; | ||||
| pub mod intel_amt; | ||||
|  | ||||
| @ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall { | ||||
|     } | ||||
| 
 | ||||
|     async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { | ||||
|         warn!( | ||||
|             "TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here" | ||||
|         ); | ||||
|         let mut config = self.opnsense_config.write().await; | ||||
|         let mut load_balancer = config.load_balancer(); | ||||
| 
 | ||||
|         let (frontend, backend, servers, healthcheck) = | ||||
|             harmony_load_balancer_service_to_haproxy_xml(service); | ||||
|         let mut load_balancer = config.load_balancer(); | ||||
|         load_balancer.add_backend(backend); | ||||
|         load_balancer.add_frontend(frontend); | ||||
|         load_balancer.add_servers(servers); | ||||
|         if let Some(healthcheck) = healthcheck { | ||||
|             load_balancer.add_healthcheck(healthcheck); | ||||
|         } | ||||
| 
 | ||||
|         load_balancer.configure_service(frontend, backend, servers, healthcheck); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| @ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( | ||||
|                 .backends | ||||
|                 .backends | ||||
|                 .iter() | ||||
|                 .find(|b| b.uuid == frontend.default_backend); | ||||
|                 .find(|b| Some(b.uuid.clone()) == frontend.default_backend); | ||||
| 
 | ||||
|             let mut health_check = None; | ||||
|             match matching_backend { | ||||
| @ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( | ||||
|                 } | ||||
|                 None => { | ||||
|                     warn!( | ||||
|                         "HAProxy config could not find a matching backend for frontend {:?}", | ||||
|                         frontend | ||||
|                         "HAProxy config could not find a matching backend for frontend {frontend:?}" | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
| @ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend( | ||||
|         .servers | ||||
|         .iter() | ||||
|         .filter_map(|server| { | ||||
|             let address = server.address.clone()?; | ||||
|             let port = server.port?; | ||||
| 
 | ||||
|             if backend_servers.contains(&server.uuid.as_str()) { | ||||
|                 return Some(BackendServer { | ||||
|                     address: server.address.clone(), | ||||
|                     port: server.port, | ||||
|                 }); | ||||
|                 return Some(BackendServer { address, port }); | ||||
|             } | ||||
|             None | ||||
|         }) | ||||
| @ -347,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( | ||||
|         name: format!("frontend_{}", service.listening_port), | ||||
|         bind: service.listening_port.to_string(), | ||||
|         mode: "tcp".to_string(), // TODO do not depend on health check here
 | ||||
|         default_backend: backend.uuid.clone(), | ||||
|         default_backend: Some(backend.uuid.clone()), | ||||
|         ..Default::default() | ||||
|     }; | ||||
|     info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); | ||||
| @ -361,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer { | ||||
|         uuid: Uuid::new_v4().to_string(), | ||||
|         name: format!("{}_{}", &server.address, &server.port), | ||||
|         enabled: 1, | ||||
|         address: server.address.clone(), | ||||
|         port: server.port, | ||||
|         address: Some(server.address.clone()), | ||||
|         port: Some(server.port), | ||||
|         mode: "active".to_string(), | ||||
|         server_type: "static".to_string(), | ||||
|         ..Default::default() | ||||
| @ -385,8 +378,8 @@ mod tests { | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             address: Some("192.168.1.1".to_string()), | ||||
|             port: Some(80), | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
| @ -411,8 +404,8 @@ mod tests { | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             address: Some("192.168.1.1".to_string()), | ||||
|             port: Some(80), | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
| @ -431,8 +424,8 @@ mod tests { | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "192.168.1.1".to_string(), | ||||
|             port: 80, | ||||
|             address: Some("192.168.1.1".to_string()), | ||||
|             port: Some(80), | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
| @ -453,16 +446,16 @@ mod tests { | ||||
|         let mut haproxy = HAProxy::default(); | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server1".to_string(), | ||||
|             address: "some-hostname.test.mcd".to_string(), | ||||
|             port: 80, | ||||
|             address: Some("some-hostname.test.mcd".to_string()), | ||||
|             port: Some(80), | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
| 
 | ||||
|         let server = HAProxyServer { | ||||
|             uuid: "server2".to_string(), | ||||
|             address: "192.168.1.2".to_string(), | ||||
|             port: 8080, | ||||
|             address: Some("192.168.1.2".to_string()), | ||||
|             port: Some(8080), | ||||
|             ..Default::default() | ||||
|         }; | ||||
|         haproxy.servers.servers.push(server); | ||||
|  | ||||
| @ -1,7 +1,10 @@ | ||||
| use std::error::Error; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::topology::Topology; | ||||
| use crate::{executors::ExecutorError, topology::Topology}; | ||||
| 
 | ||||
| /// An ApplicationFeature provided by harmony, such as Backups, Monitoring, MultisiteAvailability,
 | ||||
| /// ContinuousIntegration, ContinuousDelivery
 | ||||
| @ -9,7 +12,10 @@ use crate::topology::Topology; | ||||
| pub trait ApplicationFeature<T: Topology>: | ||||
|     std::fmt::Debug + Send + Sync + ApplicationFeatureClone<T> | ||||
| { | ||||
|     async fn ensure_installed(&self, topology: &T) -> Result<(), String>; | ||||
|     async fn ensure_installed( | ||||
|         &self, | ||||
|         topology: &T, | ||||
|     ) -> Result<InstallationOutcome, InstallationError>; | ||||
|     fn name(&self) -> String; | ||||
| } | ||||
| 
 | ||||
| @ -40,3 +46,60 @@ impl<T: Topology> Clone for Box<dyn ApplicationFeature<T>> { | ||||
|         self.clone_box() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, PartialEq, Eq)] | ||||
| pub enum InstallationOutcome { | ||||
|     Success { details: Vec<String> }, | ||||
|     Noop, | ||||
| } | ||||
| 
 | ||||
| impl InstallationOutcome { | ||||
|     pub fn success() -> Self { | ||||
|         Self::Success { details: vec![] } | ||||
|     } | ||||
| 
 | ||||
|     pub fn success_with_details(details: Vec<String>) -> Self { | ||||
|         Self::Success { details } | ||||
|     } | ||||
| 
 | ||||
|     pub fn noop() -> Self { | ||||
|         Self::Noop | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, new)] | ||||
| pub struct InstallationError { | ||||
|     msg: String, | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for InstallationError { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         f.write_str(&self.msg) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Error for InstallationError {} | ||||
| 
 | ||||
| impl From<ExecutorError> for InstallationError { | ||||
|     fn from(value: ExecutorError) -> Self { | ||||
|         Self { | ||||
|             msg: format!("InstallationError : {value}"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<kube::Error> for InstallationError { | ||||
|     fn from(value: kube::Error) -> Self { | ||||
|         Self { | ||||
|             msg: format!("InstallationError : {value}"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<String> for InstallationError { | ||||
|     fn from(value: String) -> Self { | ||||
|         Self { | ||||
|             msg: format!("PreparationError : {value}"), | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -2,7 +2,7 @@ use async_trait::async_trait; | ||||
| use log::info; | ||||
| 
 | ||||
| use crate::{ | ||||
|     modules::application::ApplicationFeature, | ||||
|     modules::application::{ApplicationFeature, InstallationError, InstallationOutcome}, | ||||
|     topology::{K8sclient, Topology}, | ||||
| }; | ||||
| 
 | ||||
| @ -29,7 +29,10 @@ impl Default for PublicEndpoint { | ||||
| /// For now we only suport K8s ingress, but we will support more stuff at some point
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + K8sclient + 'static> ApplicationFeature<T> for PublicEndpoint { | ||||
|     async fn ensure_installed(&self, _topology: &T) -> Result<(), String> { | ||||
|     async fn ensure_installed( | ||||
|         &self, | ||||
|         _topology: &T, | ||||
|     ) -> Result<InstallationOutcome, InstallationError> { | ||||
|         info!( | ||||
|             "Making sure public endpoint is installed for port {}", | ||||
|             self.application_port | ||||
|  | ||||
| @ -1,4 +1,5 @@ | ||||
| use async_trait::async_trait; | ||||
| use harmony_macros::hurl; | ||||
| use kube::{Api, api::GroupVersionKind}; | ||||
| use log::{debug, warn}; | ||||
| use non_blank_string_rs::NonBlankString; | ||||
| @ -13,7 +14,8 @@ use crate::{ | ||||
|     modules::helm::chart::{HelmChartScore, HelmRepository}, | ||||
|     score::Score, | ||||
|     topology::{ | ||||
|         HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, k8s::K8sClient, | ||||
|         HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress, | ||||
|         k8s::K8sClient, | ||||
|     }, | ||||
| }; | ||||
| use harmony_types::id::Id; | ||||
| @ -27,7 +29,7 @@ pub struct ArgoHelmScore { | ||||
|     pub argo_apps: Vec<ArgoApplication>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + HelmCommand + K8sclient> Score<T> for ArgoHelmScore { | ||||
| impl<T: Topology + HelmCommand + K8sclient + Ingress> Score<T> for ArgoHelmScore { | ||||
|     fn create_interpret(&self) -> Box<dyn crate::interpret::Interpret<T>> { | ||||
|         Box::new(ArgoInterpret { | ||||
|             score: self.clone(), | ||||
| @ -47,17 +49,15 @@ pub struct ArgoInterpret { | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret { | ||||
| impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let k8s_client = topology.k8s_client().await?; | ||||
|         let domain = self | ||||
|             .get_host_domain(k8s_client.clone(), self.score.openshift) | ||||
|             .await?; | ||||
|         let domain = format!("argo.{domain}"); | ||||
|         let svc = format!("argo-{}", self.score.namespace.clone()); | ||||
|         let domain = topology.get_domain(&svc).await?; | ||||
|         let helm_score = | ||||
|             argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain); | ||||
| 
 | ||||
| @ -68,14 +68,17 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret { | ||||
|             .await | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         Ok(Outcome::success(format!( | ||||
|             "ArgoCD installed with {} {}", | ||||
|             self.argo_apps.len(), | ||||
|             match self.argo_apps.len() { | ||||
|                 1 => "application", | ||||
|                 _ => "applications", | ||||
|             } | ||||
|         ))) | ||||
|         Ok(Outcome::success_with_details( | ||||
|             format!( | ||||
|                 "ArgoCD {} {}", | ||||
|                 self.argo_apps.len(), | ||||
|                 match self.argo_apps.len() { | ||||
|                     1 => "application", | ||||
|                     _ => "applications", | ||||
|                 } | ||||
|             ), | ||||
|             vec![format!("argo application: http://{}", domain)], | ||||
|         )) | ||||
|     } | ||||
| 
 | ||||
|     fn get_name(&self) -> InterpretName { | ||||
| @ -158,6 +161,9 @@ global: | ||||
|   ## Used for ingresses, certificates, SSO, notifications, etc. | ||||
|   domain: {domain} | ||||
| 
 | ||||
|   securityContext:  | ||||
|     runAsUser: null | ||||
| 
 | ||||
|   # -- Runtime class name for all components | ||||
|   runtimeClassName: "" | ||||
| 
 | ||||
| @ -469,6 +475,13 @@ redis: | ||||
|   # -- Redis name | ||||
|   name: redis | ||||
| 
 | ||||
|   serviceAccount: | ||||
|     create: true | ||||
| 
 | ||||
|   securityContext: | ||||
|     runAsUser: null | ||||
| 
 | ||||
| 
 | ||||
|   ## Redis image | ||||
|   image: | ||||
|     # -- Redis repository | ||||
| @ -1039,7 +1052,7 @@ commitServer: | ||||
|         install_only: false, | ||||
|         repository: Some(HelmRepository::new( | ||||
|             "argo".to_string(), | ||||
|             url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(), | ||||
|             hurl!("https://argoproj.github.io/argo-helm"), | ||||
|             true, | ||||
|         )), | ||||
|     } | ||||
|  | ||||
| @ -1,13 +1,12 @@ | ||||
| mod endpoint; | ||||
| pub mod rhob_monitoring; | ||||
| mod multisite; | ||||
| pub use endpoint::*; | ||||
| 
 | ||||
| mod monitoring; | ||||
| pub use monitoring::*; | ||||
| 
 | ||||
| mod continuous_delivery; | ||||
| pub use continuous_delivery::*; | ||||
| mod packaging_deployment; | ||||
| pub use packaging_deployment::*; | ||||
| 
 | ||||
| mod helm_argocd_score; | ||||
| pub use helm_argocd_score::*; | ||||
|  | ||||
| @ -1,10 +1,14 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use crate::modules::application::{Application, ApplicationFeature}; | ||||
| use crate::modules::application::{ | ||||
|     Application, ApplicationFeature, InstallationError, InstallationOutcome, | ||||
| }; | ||||
| use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; | ||||
| use crate::modules::monitoring::grafana::grafana::Grafana; | ||||
| use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; | ||||
| 
 | ||||
| use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{ | ||||
|     ServiceMonitor, ServiceMonitorSpec, | ||||
| }; | ||||
| use crate::topology::MultiTargetTopology; | ||||
| use crate::topology::ingress::Ingress; | ||||
| use crate::{ | ||||
|     inventory::Inventory, | ||||
|     modules::monitoring::{ | ||||
| @ -14,13 +18,18 @@ use crate::{ | ||||
|     topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, | ||||
| }; | ||||
| use crate::{ | ||||
|     modules::prometheus::prometheus::PrometheusApplicationMonitoring, | ||||
|     modules::prometheus::prometheus::PrometheusMonitoring, | ||||
|     topology::oberservability::monitoring::AlertReceiver, | ||||
| }; | ||||
| use async_trait::async_trait; | ||||
| use base64::{Engine as _, engine::general_purpose}; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_secret_derive::Secret; | ||||
| use harmony_types::net::Url; | ||||
| use kube::api::ObjectMeta; | ||||
| use log::{debug, info}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct Monitoring { | ||||
| @ -36,41 +45,55 @@ impl< | ||||
|         + TenantManager | ||||
|         + K8sclient | ||||
|         + MultiTargetTopology | ||||
|         + std::fmt::Debug | ||||
|         + PrometheusApplicationMonitoring<CRDPrometheus>, | ||||
|         + PrometheusMonitoring<CRDPrometheus> | ||||
|         + Grafana | ||||
|         + Ingress | ||||
|         + std::fmt::Debug, | ||||
| > ApplicationFeature<T> for Monitoring | ||||
| { | ||||
|     async fn ensure_installed(&self, topology: &T) -> Result<(), String> { | ||||
|     async fn ensure_installed( | ||||
|         &self, | ||||
|         topology: &T, | ||||
|     ) -> Result<InstallationOutcome, InstallationError> { | ||||
|         info!("Ensuring monitoring is available for application"); | ||||
|         let namespace = topology | ||||
|             .get_tenant_config() | ||||
|             .await | ||||
|             .map(|ns| ns.name.clone()) | ||||
|             .unwrap_or_else(|| self.application.name()); | ||||
|         let domain = topology.get_domain("ntfy").await.unwrap(); | ||||
| 
 | ||||
|         let app_service_monitor = ServiceMonitor { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(self.application.name()), | ||||
|                 namespace: Some(namespace.clone()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: ServiceMonitorSpec::default(), | ||||
|         }; | ||||
| 
 | ||||
|         let mut alerting_score = ApplicationMonitoringScore { | ||||
|             sender: CRDPrometheus { | ||||
|                 namespace: namespace.clone(), | ||||
|                 client: topology.k8s_client().await.unwrap(), | ||||
|                 service_monitor: vec![app_service_monitor], | ||||
|             }, | ||||
|             application: self.application.clone(), | ||||
|             receivers: self.alert_receiver.clone(), | ||||
|         }; | ||||
|         let ntfy = NtfyScore { | ||||
|             namespace: namespace.clone(), | ||||
|             host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), | ||||
|             host: domain, | ||||
|         }; | ||||
|         ntfy.interpret(&Inventory::empty(), topology) | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         let ntfy_default_auth_username = "harmony"; | ||||
|         let ntfy_default_auth_password = "harmony"; | ||||
|         let config = SecretManager::get_or_prompt::<NtfyAuth>().await.unwrap(); | ||||
| 
 | ||||
|         let ntfy_default_auth_header = format!( | ||||
|             "Basic {}", | ||||
|             general_purpose::STANDARD.encode(format!( | ||||
|                 "{ntfy_default_auth_username}:{ntfy_default_auth_password}" | ||||
|             )) | ||||
|             general_purpose::STANDARD.encode(format!("{}:{}", config.username, config.password)) | ||||
|         ); | ||||
| 
 | ||||
|         debug!("ntfy_default_auth_header: {ntfy_default_auth_header}"); | ||||
| @ -100,9 +123,17 @@ impl< | ||||
|             .interpret(&Inventory::empty(), topology) | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
|         Ok(()) | ||||
| 
 | ||||
|         Ok(InstallationOutcome::success()) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "Monitoring".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Clone, Debug)] | ||||
| struct NtfyAuth { | ||||
|     username: String, | ||||
|     password: String, | ||||
| } | ||||
|  | ||||
| @ -1,49 +0,0 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use crate::modules::application::{Application, ApplicationFeature, StatelessApplication}; | ||||
| use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; | ||||
| use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus; | ||||
| 
 | ||||
| use crate::topology::{K8sAnywhereTopology, MultiTargetTopology}; | ||||
| use crate::{ | ||||
|     inventory::Inventory, | ||||
|     modules::monitoring::{ | ||||
|         alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, | ||||
| }; | ||||
| use crate::{ | ||||
|     modules::prometheus::prometheus::PrometheusApplicationMonitoring, | ||||
|     topology::oberservability::monitoring::AlertReceiver, | ||||
| }; | ||||
| use async_trait::async_trait; | ||||
| use base64::{Engine as _, engine::general_purpose}; | ||||
| use harmony_types::net::Url; | ||||
| use log::{debug, info}; | ||||
| 
 | ||||
| trait DebugTopology: Topology + std::fmt::Debug {} | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct Multisite { | ||||
|     app: Arc<dyn StatelessApplication>, | ||||
|     secondary_site: Arc<K8sAnywhereTopology>, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology> ApplicationFeature<T> for Multisite { | ||||
|     async fn ensure_installed(&self, topology: &T) -> Result<(), String> { | ||||
| 
 | ||||
|         todo!( | ||||
|             " | ||||
|         - Find a way to get pvs for this application | ||||
|         - find the pv csi volumes uuid | ||||
|         - run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot | ||||
|         - enjoy | ||||
|         " | ||||
|         ) | ||||
|     } | ||||
|     fn name(&self) -> String { | ||||
|         "Multisite".to_string() | ||||
|     } | ||||
| } | ||||
| @ -10,11 +10,13 @@ use crate::{ | ||||
|     data::Version, | ||||
|     inventory::Inventory, | ||||
|     modules::application::{ | ||||
|         ApplicationFeature, HelmPackage, OCICompliant, | ||||
|         ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant, | ||||
|         features::{ArgoApplication, ArgoHelmScore}, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology}, | ||||
|     topology::{ | ||||
|         DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress, | ||||
|     }, | ||||
| }; | ||||
| 
 | ||||
| /// ContinuousDelivery in Harmony provides this functionality :
 | ||||
| @ -45,60 +47,11 @@ use crate::{ | ||||
| /// - ArgoCD to install/upgrade/rollback/inspect k8s resources
 | ||||
| /// - Kubernetes for runtime orchestration
 | ||||
| #[derive(Debug, Default, Clone)] | ||||
| pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> { | ||||
| pub struct PackagingDeployment<A: OCICompliant + HelmPackage> { | ||||
|     pub application: Arc<A>, | ||||
| } | ||||
| 
 | ||||
| impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> { | ||||
|     pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String> | ||||
|     where | ||||
|         T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static, | ||||
|     { | ||||
|         // TODO: this is a temporary hack for demo purposes, the deployment target should be driven
 | ||||
|         // by the topology only and we should not have to know how to perform tasks like this for
 | ||||
|         // which the topology should be responsible.
 | ||||
|         //
 | ||||
|         // That said, this will require some careful architectural decisions, since the concept of
 | ||||
|         // deployment targets / profiles is probably a layer of complexity that we won't be
 | ||||
|         // completely able to avoid
 | ||||
|         //
 | ||||
|         // I'll try something for now that must be thought through after : att a deployment_profile
 | ||||
|         // function to the topology trait that returns a profile, then anybody who needs it can
 | ||||
|         // access it. This forces every Topology to understand the concept of targets though... So
 | ||||
|         // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
 | ||||
|         // goes. It still does not feel right though.
 | ||||
|         //
 | ||||
|         // https://git.nationtech.io/NationTech/harmony/issues/106
 | ||||
|         match topology.current_target() { | ||||
|             DeploymentTarget::LocalDev => { | ||||
|                 info!("Deploying {} locally...", self.application.name()); | ||||
|                 self.deploy_to_local_k3d(self.application.name(), helm_chart, image) | ||||
|                     .await?; | ||||
|             } | ||||
|             target => { | ||||
|                 info!("Deploying {} to target {target:?}", self.application.name()); | ||||
| 
 | ||||
|                 let score = ArgoHelmScore { | ||||
|                     namespace: format!("{}", self.application.name()), | ||||
|                     openshift: true, | ||||
|                     argo_apps: vec![ArgoApplication::from(CDApplicationConfig { | ||||
|                         // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
 | ||||
|                         version: Version::from("0.1.0").unwrap(), | ||||
|                         helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), | ||||
|                         helm_chart_name: format!("{}-chart", self.application.name()), | ||||
|                         values_overrides: None, | ||||
|                         name: format!("{}", self.application.name()), | ||||
|                         namespace: format!("{}", self.application.name()), | ||||
|                     })], | ||||
|                 }; | ||||
|                 score | ||||
|                     .interpret(&Inventory::empty(), topology) | ||||
|                     .await | ||||
|                     .unwrap(); | ||||
|             } | ||||
|         }; | ||||
|         Ok(()) | ||||
|     } | ||||
| impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> { | ||||
|     async fn deploy_to_local_k3d( | ||||
|         &self, | ||||
|         app_name: String, | ||||
| @ -185,24 +138,81 @@ impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> { | ||||
| #[async_trait] | ||||
| impl< | ||||
|     A: OCICompliant + HelmPackage + Clone + 'static, | ||||
|     T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static, | ||||
| > ApplicationFeature<T> for ContinuousDelivery<A> | ||||
|     T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static, | ||||
| > ApplicationFeature<T> for PackagingDeployment<A> | ||||
| { | ||||
|     async fn ensure_installed(&self, topology: &T) -> Result<(), String> { | ||||
|     async fn ensure_installed( | ||||
|         &self, | ||||
|         topology: &T, | ||||
|     ) -> Result<InstallationOutcome, InstallationError> { | ||||
|         let image = self.application.image_name(); | ||||
|         let domain = topology | ||||
|             .get_domain(&self.application.name()) | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
| 
 | ||||
|         // TODO Write CI/CD workflow files
 | ||||
|         // we can autotedect the CI type using the remote url (default to github action for github
 | ||||
|         // url, etc..)
 | ||||
|         // Or ask for it when unknown
 | ||||
| 
 | ||||
|         let helm_chart = self.application.build_push_helm_package(&image).await?; | ||||
|         let helm_chart = self | ||||
|             .application | ||||
|             .build_push_helm_package(&image, &domain) | ||||
|             .await?; | ||||
| 
 | ||||
|         // TODO: Make building image configurable/skippable if image already exists (prompt)")
 | ||||
|         // https://git.nationtech.io/NationTech/harmony/issues/104
 | ||||
|         let image = self.application.build_push_oci_image().await?; | ||||
| 
 | ||||
|         self.deploy(topology, helm_chart, image).await | ||||
|         // TODO: this is a temporary hack for demo purposes, the deployment target should be driven
 | ||||
|         // by the topology only and we should not have to know how to perform tasks like this for
 | ||||
|         // which the topology should be responsible.
 | ||||
|         //
 | ||||
|         // That said, this will require some careful architectural decisions, since the concept of
 | ||||
|         // deployment targets / profiles is probably a layer of complexity that we won't be
 | ||||
|         // completely able to avoid
 | ||||
|         //
 | ||||
|         // I'll try something for now that must be thought through after : att a deployment_profile
 | ||||
|         // function to the topology trait that returns a profile, then anybody who needs it can
 | ||||
|         // access it. This forces every Topology to understand the concept of targets though... So
 | ||||
|         // instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
 | ||||
|         // goes. It still does not feel right though.
 | ||||
|         //
 | ||||
|         // https://git.nationtech.io/NationTech/harmony/issues/106
 | ||||
|         match topology.current_target() { | ||||
|             DeploymentTarget::LocalDev => { | ||||
|                 info!("Deploying {} locally...", self.application.name()); | ||||
|                 self.deploy_to_local_k3d(self.application.name(), helm_chart, image) | ||||
|                     .await?; | ||||
|             } | ||||
|             target => { | ||||
|                 info!("Deploying {} to target {target:?}", self.application.name()); | ||||
| 
 | ||||
|                 let score = ArgoHelmScore { | ||||
|                     namespace: format!("{}", self.application.name()), | ||||
|                     openshift: true, | ||||
|                     argo_apps: vec![ArgoApplication::from(CDApplicationConfig { | ||||
|                         // helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
 | ||||
|                         version: Version::from("0.1.0").unwrap(), | ||||
|                         helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(), | ||||
|                         helm_chart_name: format!("{}-chart", self.application.name()), | ||||
|                         values_overrides: None, | ||||
|                         name: format!("{}", self.application.name()), | ||||
|                         namespace: format!("{}", self.application.name()), | ||||
|                     })], | ||||
|                 }; | ||||
|                 score | ||||
|                     .interpret(&Inventory::empty(), topology) | ||||
|                     .await | ||||
|                     .unwrap(); | ||||
|             } | ||||
|         }; | ||||
| 
 | ||||
|         Ok(InstallationOutcome::success_with_details(vec![format!( | ||||
|             "{}: http://{domain}", | ||||
|             self.application.name() | ||||
|         )])) | ||||
|     } | ||||
|     fn name(&self) -> String { | ||||
|         "ContinuousDelivery".to_string() | ||||
| @ -1,11 +1,14 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use crate::modules::application::{Application, ApplicationFeature}; | ||||
| use crate::modules::application::{ | ||||
|     Application, ApplicationFeature, InstallationError, InstallationOutcome, | ||||
| }; | ||||
| use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore; | ||||
| use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore; | ||||
| 
 | ||||
| use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability; | ||||
| use crate::topology::MultiTargetTopology; | ||||
| use crate::topology::ingress::Ingress; | ||||
| use crate::{ | ||||
|     inventory::Inventory, | ||||
|     modules::monitoring::{ | ||||
| @ -15,7 +18,7 @@ use crate::{ | ||||
|     topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, | ||||
| }; | ||||
| use crate::{ | ||||
|     modules::prometheus::prometheus::PrometheusApplicationMonitoring, | ||||
|     modules::prometheus::prometheus::PrometheusMonitoring, | ||||
|     topology::oberservability::monitoring::AlertReceiver, | ||||
| }; | ||||
| use async_trait::async_trait; | ||||
| @ -24,7 +27,7 @@ use harmony_types::net::Url; | ||||
| use log::{debug, info}; | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct RHOBMonitoring { | ||||
| pub struct Monitoring { | ||||
|     pub application: Arc<dyn Application>, | ||||
|     pub alert_receiver: Vec<Box<dyn AlertReceiver<RHOBObservability>>>, | ||||
| } | ||||
| @ -37,11 +40,15 @@ impl< | ||||
|         + TenantManager | ||||
|         + K8sclient | ||||
|         + MultiTargetTopology | ||||
|         + Ingress | ||||
|         + std::fmt::Debug | ||||
|         + PrometheusApplicationMonitoring<RHOBObservability>, | ||||
| > ApplicationFeature<T> for RHOBMonitoring | ||||
|         + PrometheusMonitoring<RHOBObservability>, | ||||
| > ApplicationFeature<T> for Monitoring | ||||
| { | ||||
|     async fn ensure_installed(&self, topology: &T) -> Result<(), String> { | ||||
|     async fn ensure_installed( | ||||
|         &self, | ||||
|         topology: &T, | ||||
|     ) -> Result<InstallationOutcome, InstallationError> { | ||||
|         info!("Ensuring monitoring is available for application"); | ||||
|         let namespace = topology | ||||
|             .get_tenant_config() | ||||
| @ -57,9 +64,13 @@ impl< | ||||
|             application: self.application.clone(), | ||||
|             receivers: self.alert_receiver.clone(), | ||||
|         }; | ||||
|         let domain = topology | ||||
|             .get_domain("ntfy") | ||||
|             .await | ||||
|             .map_err(|e| format!("could not get domain {e}"))?; | ||||
|         let ntfy = NtfyScore { | ||||
|             namespace: namespace.clone(), | ||||
|             host: "ntfy.harmonydemo.apps.ncd0.harmony.mcd".to_string(), | ||||
|             host: domain.clone(), | ||||
|         }; | ||||
|         ntfy.interpret(&Inventory::empty(), topology) | ||||
|             .await | ||||
| @ -81,27 +92,33 @@ impl< | ||||
|             .replace("=", ""); | ||||
| 
 | ||||
|         debug!("ntfy_default_auth_param: {ntfy_default_auth_param}"); | ||||
| 
 | ||||
|         let ntfy_receiver = WebhookReceiver { | ||||
|             name: "ntfy-webhook".to_string(), | ||||
|             url: Url::Url( | ||||
|                 url::Url::parse( | ||||
|                     format!( | ||||
|                         "http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}", | ||||
|                         namespace.clone() | ||||
|                         "http://{domain}/{}?auth={ntfy_default_auth_param}", | ||||
|                         self.application.name() | ||||
|                     ) | ||||
|                     .as_str(), | ||||
|                 ) | ||||
|                 .unwrap(), | ||||
|             ), | ||||
|         }; | ||||
| 
 | ||||
|         debug!( | ||||
|             "ntfy webhook receiver \n{:#?}\nntfy topic: {}", | ||||
|             ntfy_receiver.clone(), | ||||
|             self.application.name() | ||||
|         ); | ||||
|         alerting_score.receivers.push(Box::new(ntfy_receiver)); | ||||
|         alerting_score | ||||
|             .interpret(&Inventory::empty(), topology) | ||||
|             .await | ||||
|             .map_err(|e| e.to_string())?; | ||||
|         Ok(()) | ||||
|         Ok(InstallationOutcome::success_with_details(vec![format!( | ||||
|             "ntfy topic: {}", | ||||
|             self.application.name() | ||||
|         )])) | ||||
|     } | ||||
|     fn name(&self) -> String { | ||||
|         "Monitoring".to_string() | ||||
|  | ||||
| @ -2,10 +2,6 @@ mod feature; | ||||
| pub mod features; | ||||
| pub mod oci; | ||||
| mod rust; | ||||
| mod stateless; | ||||
| mod stateful; | ||||
| pub use stateless::*; | ||||
| pub use stateful::*; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| pub use feature::*; | ||||
| @ -28,8 +24,8 @@ use harmony_types::id::Id; | ||||
| #[derive(Clone, Debug)] | ||||
| pub enum ApplicationFeatureStatus { | ||||
|     Installing, | ||||
|     Installed, | ||||
|     Failed { details: String }, | ||||
|     Installed { details: Vec<String> }, | ||||
|     Failed { message: String }, | ||||
| } | ||||
| 
 | ||||
| pub trait Application: std::fmt::Debug + Send + Sync { | ||||
| @ -69,27 +65,32 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application | ||||
|             .unwrap(); | ||||
| 
 | ||||
|             let _ = match feature.ensure_installed(topology).await { | ||||
|                 Ok(()) => { | ||||
|                 Ok(outcome) => { | ||||
|                     instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { | ||||
|                         topology: topology.name().into(), | ||||
|                         application: self.application.name(), | ||||
|                         feature: feature.name(), | ||||
|                         status: ApplicationFeatureStatus::Installed, | ||||
|                         status: ApplicationFeatureStatus::Installed { | ||||
|                             details: match outcome { | ||||
|                                 InstallationOutcome::Success { details } => details, | ||||
|                                 InstallationOutcome::Noop => vec![], | ||||
|                             }, | ||||
|                         }, | ||||
|                     }) | ||||
|                     .unwrap(); | ||||
|                 } | ||||
|                 Err(msg) => { | ||||
|                 Err(error) => { | ||||
|                     instrumentation::instrument(HarmonyEvent::ApplicationFeatureStateChanged { | ||||
|                         topology: topology.name().into(), | ||||
|                         application: self.application.name(), | ||||
|                         feature: feature.name(), | ||||
|                         status: ApplicationFeatureStatus::Failed { | ||||
|                             details: msg.clone(), | ||||
|                             message: error.to_string(), | ||||
|                         }, | ||||
|                     }) | ||||
|                     .unwrap(); | ||||
|                     return Err(InterpretError::new(format!( | ||||
|                         "Application Interpret failed to install feature : {msg}" | ||||
|                         "Application Interpret failed to install feature : {error}" | ||||
|                     ))); | ||||
|                 } | ||||
|             }; | ||||
|  | ||||
| @ -1,6 +1,5 @@ | ||||
| use async_trait::async_trait; | ||||
| 
 | ||||
| use super::Application; | ||||
| use async_trait::async_trait; | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait OCICompliant: Application { | ||||
| @ -17,5 +16,10 @@ pub trait HelmPackage: Application { | ||||
|     ///
 | ||||
|     /// # Arguments
 | ||||
|     /// * `image_url` - The full URL of the OCI container image to be used in the Deployment.
 | ||||
|     async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String>; | ||||
|     /// * `domain` - The domain where the application is hosted.
 | ||||
|     async fn build_push_helm_package( | ||||
|         &self, | ||||
|         image_url: &str, | ||||
|         domain: &str, | ||||
|     ) -> Result<String, String>; | ||||
| } | ||||
|  | ||||
| @ -1,5 +1,4 @@ | ||||
| use std::fs::{self, File}; | ||||
| use std::io::Read; | ||||
| use std::fs::{self}; | ||||
| use std::path::{Path, PathBuf}; | ||||
| use std::process; | ||||
| use std::sync::Arc; | ||||
| @ -11,14 +10,13 @@ use dockerfile_builder::Dockerfile; | ||||
| use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, USER, WORKDIR}; | ||||
| use dockerfile_builder::instruction_builder::CopyBuilder; | ||||
| use futures_util::StreamExt; | ||||
| use log::{debug, info, log_enabled}; | ||||
| use log::{debug, error, info, log_enabled, trace, warn}; | ||||
| use serde::Serialize; | ||||
| use tar::{Archive, Builder, Header}; | ||||
| use tar::{Builder, Header}; | ||||
| use walkdir::WalkDir; | ||||
| 
 | ||||
| use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; | ||||
| use crate::{score::Score, topology::Topology}; | ||||
| use harmony_types::net::Url; | ||||
| 
 | ||||
| use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; | ||||
| 
 | ||||
| @ -58,7 +56,6 @@ pub enum RustWebFramework { | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub struct RustWebapp { | ||||
|     pub name: String, | ||||
|     pub domain: Url, | ||||
|     /// The path to the root of the Rust project to be containerized.
 | ||||
|     pub project_root: PathBuf, | ||||
|     pub service_port: u32, | ||||
| @ -73,12 +70,17 @@ impl Application for RustWebapp { | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl HelmPackage for RustWebapp { | ||||
|     async fn build_push_helm_package(&self, image_url: &str) -> Result<String, String> { | ||||
|     async fn build_push_helm_package( | ||||
|         &self, | ||||
|         image_url: &str, | ||||
|         domain: &str, | ||||
|     ) -> Result<String, String> { | ||||
|         info!("Starting Helm chart build and push for '{}'", self.name); | ||||
| 
 | ||||
|         // 1. Create the Helm chart files on disk.
 | ||||
|         let chart_dir = self | ||||
|             .create_helm_chart_files(image_url) | ||||
|             .create_helm_chart_files(image_url, domain) | ||||
|             .await | ||||
|             .map_err(|e| format!("Failed to create Helm chart files: {}", e))?; | ||||
|         info!("Successfully created Helm chart files in {:?}", chart_dir); | ||||
| 
 | ||||
| @ -160,7 +162,7 @@ impl RustWebapp { | ||||
|         &self, | ||||
|         image_name: &str, | ||||
|     ) -> Result<String, Box<dyn std::error::Error>> { | ||||
|         debug!("Generating Dockerfile for '{}'", self.name); | ||||
|         info!("Generating Dockerfile for '{}'", self.name); | ||||
|         let dockerfile = self.get_or_build_dockerfile(); | ||||
|         let quiet = !log_enabled!(log::Level::Debug); | ||||
|         match dockerfile | ||||
| @ -192,8 +194,41 @@ impl RustWebapp { | ||||
|                     Some(body_full(tar_data.into())), | ||||
|                 ); | ||||
| 
 | ||||
|                 while let Some(msg) = image_build_stream.next().await { | ||||
|                     debug!("Message: {msg:?}"); | ||||
|                 while let Some(mut msg) = image_build_stream.next().await { | ||||
|                     trace!("Got bollard msg {msg:?}"); | ||||
|                     match msg { | ||||
|                         Ok(mut msg) => { | ||||
|                             if let Some(progress) = msg.progress_detail { | ||||
|                                 info!( | ||||
|                                     "Build progress {}/{}", | ||||
|                                     progress.current.unwrap_or(0), | ||||
|                                     progress.total.unwrap_or(0) | ||||
|                                 ); | ||||
|                             } | ||||
| 
 | ||||
|                             if let Some(mut log) = msg.stream { | ||||
|                                 if log.ends_with('\n') { | ||||
|                                     log.pop(); | ||||
|                                     if log.ends_with('\r') { | ||||
|                                         log.pop(); | ||||
|                                     } | ||||
|                                 } | ||||
|                                 info!("{log}"); | ||||
|                             } | ||||
| 
 | ||||
|                             if let Some(error) = msg.error { | ||||
|                                 warn!("Build error : {error:?}"); | ||||
|                             } | ||||
| 
 | ||||
|                             if let Some(error) = msg.error_detail { | ||||
|                                 warn!("Build error : {error:?}"); | ||||
|                             } | ||||
|                         } | ||||
|                         Err(e) => { | ||||
|                             error!("Build failed : {e}"); | ||||
|                             return Err(format!("Build failed : {e}").into()); | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
| 
 | ||||
|                 Ok(image_name.to_string()) | ||||
| @ -206,7 +241,7 @@ impl RustWebapp { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     ///normalizes timestamp and ignores files that will bust the docker cache
 | ||||
|     ///normalizes timestamp and ignores files that will bust the docker cach
 | ||||
|     async fn create_deterministic_tar( | ||||
|         &self, | ||||
|         project_root: &std::path::Path, | ||||
| @ -220,7 +255,9 @@ impl RustWebapp { | ||||
|                 ".git", | ||||
|                 ".github", | ||||
|                 ".harmony_generated", | ||||
|                 "harmony", | ||||
|                 "node_modules", | ||||
|                 "Dockerfile.harmony", | ||||
|             ]; | ||||
|             let mut entries: Vec<_> = WalkDir::new(project_root) | ||||
|                 .into_iter() | ||||
| @ -265,8 +302,6 @@ impl RustWebapp { | ||||
| 
 | ||||
|         let docker = Docker::connect_with_socket_defaults().unwrap(); | ||||
| 
 | ||||
|         // let push_options = PushImageOptionsBuilder::new().tag(tag);
 | ||||
| 
 | ||||
|         let mut push_image_stream = docker.push_image( | ||||
|             image_tag, | ||||
|             Some(PushImageOptionsBuilder::new().build()), | ||||
| @ -274,6 +309,8 @@ impl RustWebapp { | ||||
|         ); | ||||
| 
 | ||||
|         while let Some(msg) = push_image_stream.next().await { | ||||
|             // let msg = msg?;
 | ||||
|             // TODO this fails silently, for some reason bollard cannot push to hub.nationtech.io
 | ||||
|             debug!("Message: {msg:?}"); | ||||
|         } | ||||
| 
 | ||||
| @ -408,9 +445,10 @@ impl RustWebapp { | ||||
|     } | ||||
| 
 | ||||
|     /// Creates all necessary files for a basic Helm chart.
 | ||||
|     fn create_helm_chart_files( | ||||
|     async fn create_helm_chart_files( | ||||
|         &self, | ||||
|         image_url: &str, | ||||
|         domain: &str, | ||||
|     ) -> Result<PathBuf, Box<dyn std::error::Error>> { | ||||
|         let chart_name = format!("{}-chart", self.name); | ||||
|         let chart_dir = self | ||||
| @ -460,21 +498,15 @@ ingress: | ||||
|   enabled: true | ||||
|   # Annotations for cert-manager to handle SSL. | ||||
|   annotations: | ||||
|     cert-manager.io/cluster-issuer: "letsencrypt-prod" | ||||
|     # Add other annotations like nginx ingress class if needed | ||||
|     # kubernetes.io/ingress.class: nginx | ||||
|   hosts: | ||||
|     - host: chart-example.local | ||||
|     - host: {} | ||||
|       paths: | ||||
|         - path: / | ||||
|           pathType: ImplementationSpecific | ||||
|   tls: | ||||
|    - secretName: {}-tls | ||||
|      hosts: | ||||
|        - chart-example.local | ||||
| 
 | ||||
| "#,
 | ||||
|             chart_name, image_repo, image_tag, self.service_port, self.name | ||||
|             chart_name, image_repo, image_tag, self.service_port, domain, | ||||
|         ); | ||||
|         fs::write(chart_dir.join("values.yaml"), values_yaml)?; | ||||
| 
 | ||||
|  | ||||
| @ -1,6 +0,0 @@ | ||||
| use crate::modules::application::Application; | ||||
| 
 | ||||
| /// A StatefulApplication is an application bundle that writes persistent data.
 | ||||
| /// 
 | ||||
| /// This will enable backup features, stateful multisite replication, etc.
 | ||||
| pub trait StatefulApplication: Application {} | ||||
| @ -1,26 +0,0 @@ | ||||
| use crate::modules::application::{Application, features::ContinuousDeliveryApplication}; | ||||
| 
 | ||||
| /// Marker trait for stateless application that can be deployed anywhere without worrying about
 | ||||
| /// data.
 | ||||
| ///
 | ||||
| /// This includes Applications fitting these categories :
 | ||||
| ///
 | ||||
| ///  - Application with all files built into the docker image and never written to, can be mounted
 | ||||
| ///  read-only
 | ||||
| ///  - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
 | ||||
| ///  not require any replication/backup logic to operate
 | ||||
| ///     - Not supported : an application that writes state to a volume that must be shared or kept
 | ||||
| ///     to maintain a quorum across various instances
 | ||||
| ///  - Application connecting to a database/datastore accessible from anywhere such as
 | ||||
| ///     - Public bucket endpoint
 | ||||
| ///     - Publicly accessible
 | ||||
| ///  - Application connecting to a private database external to this application, accessible from the
 | ||||
| ///  deployment target
 | ||||
| ///     - Ensuring the private database is reachable is out of scope of this trait (for now)
 | ||||
| ///
 | ||||
| ///  The entire application definition **must not** require any persistent volume or include a
 | ||||
| ///  deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
 | ||||
| ///
 | ||||
| ///  Typically, applications that can be autoscaled without additional complexity fit the
 | ||||
| ///  StatelessApplication requirements.
 | ||||
| pub trait StatelessApplication: Application + ContinuousDeliveryApplication {} | ||||
							
								
								
									
										209
									
								
								harmony/src/modules/cert_manager/cluster_issuer.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,209 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::id::Id; | ||||
| use kube::{CustomResource, api::ObjectMeta}; | ||||
| use schemars::JsonSchema; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::Inventory, | ||||
|     score::Score, | ||||
|     topology::{K8sclient, Topology, k8s::K8sClient}, | ||||
| }; | ||||
| 
 | ||||
| #[derive(Clone, Debug, Serialize)] | ||||
| pub struct ClusterIssuerScore { | ||||
|     email: String, | ||||
|     server: String, | ||||
|     issuer_name: String, | ||||
|     namespace: String, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore { | ||||
|     fn name(&self) -> String { | ||||
|         "ClusterIssuerScore".to_string() | ||||
|     } | ||||
| 
 | ||||
|     #[doc(hidden)] | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(ClusterIssuerInterpret { | ||||
|             score: self.clone(), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct ClusterIssuerInterpret { | ||||
|     score: ClusterIssuerScore, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         self.apply_cluster_issuer(topology.k8s_client().await.unwrap()) | ||||
|             .await | ||||
|     } | ||||
| 
 | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("ClusterIssuer") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl ClusterIssuerInterpret { | ||||
|     async fn validate_cert_manager( | ||||
|         &self, | ||||
|         client: &Arc<K8sClient>, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let cert_manager = "cert-manager".to_string(); | ||||
|         let operator_namespace = "openshift-operators".to_string(); | ||||
|         match client | ||||
|             .get_deployment(&cert_manager, Some(&operator_namespace)) | ||||
|             .await | ||||
|         { | ||||
|             Ok(Some(deployment)) => { | ||||
|                 if let Some(status) = deployment.status { | ||||
|                     let ready_count = status.ready_replicas.unwrap_or(0); | ||||
|                     if ready_count >= 1 { | ||||
|                         return Ok(Outcome::success(format!( | ||||
|                             "'{}' is ready with {} replica(s).", | ||||
|                             &cert_manager, ready_count | ||||
|                         ))); | ||||
|                     } else { | ||||
|                         return Err(InterpretError::new( | ||||
|                             "cert-manager operator not ready in cluster".to_string(), | ||||
|                         )); | ||||
|                     } | ||||
|                 } else { | ||||
|                     Err(InterpretError::new(format!( | ||||
|                         "failed to get deployment status {} in ns {}", | ||||
|                         &cert_manager, &operator_namespace | ||||
|                     ))) | ||||
|                 } | ||||
|             } | ||||
|             Ok(None) => Err(InterpretError::new(format!( | ||||
|                 "Deployment '{}' not found in namespace '{}'.", | ||||
|                 &cert_manager, &operator_namespace | ||||
|             ))), | ||||
|             Err(e) => Err(InterpretError::new(format!( | ||||
|                 "Failed to query for deployment '{}': {}", | ||||
|                 &cert_manager, e | ||||
|             ))), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> { | ||||
|         let issuer_name = &self.score.issuer_name; | ||||
|         let email = &self.score.email; | ||||
|         let server = &self.score.server; | ||||
|         let namespace = &self.score.namespace; | ||||
|         let cluster_issuer = ClusterIssuer { | ||||
|             metadata: ObjectMeta { | ||||
|                 name: Some(issuer_name.to_string()), | ||||
|                 namespace: Some(namespace.to_string()), | ||||
|                 ..Default::default() | ||||
|             }, | ||||
|             spec: ClusterIssuerSpec { | ||||
|                 acme: AcmeSpec { | ||||
|                     email: email.to_string(), | ||||
|                     private_key_secret_ref: PrivateKeySecretRef { | ||||
|                         name: issuer_name.to_string(), | ||||
|                     }, | ||||
|                     server: server.to_string(), | ||||
|                     solvers: vec![SolverSpec { | ||||
|                         http01: Some(Http01Solver { | ||||
|                             ingress: Http01Ingress { | ||||
|                                 class: "nginx".to_string(), | ||||
|                             }, | ||||
|                         }), | ||||
|                     }], | ||||
|                 }, | ||||
|             }, | ||||
|         }; | ||||
| 
 | ||||
|         Ok(cluster_issuer) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn apply_cluster_issuer( | ||||
|         &self, | ||||
|         client: Arc<K8sClient>, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let namespace = self.score.namespace.clone(); | ||||
|         self.validate_cert_manager(&client).await?; | ||||
|         let cluster_issuer = self.build_cluster_issuer().unwrap(); | ||||
|         client | ||||
|             .apply_yaml( | ||||
|                 &serde_yaml::to_value(cluster_issuer).unwrap(), | ||||
|                 Some(&namespace), | ||||
|             ) | ||||
|             .await?; | ||||
|         Ok(Outcome::success(format!( | ||||
|             "successfully deployed cluster operator: {} in namespace: {}", | ||||
|             self.score.issuer_name, self.score.namespace | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[kube(
 | ||||
|     group = "cert-manager.io", | ||||
|     version = "v1", | ||||
|     kind = "ClusterIssuer", | ||||
|     plural = "clusterissuers" | ||||
| )] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct ClusterIssuerSpec { | ||||
|     pub acme: AcmeSpec, | ||||
| } | ||||
| 
 | ||||
| #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct AcmeSpec { | ||||
|     pub email: String, | ||||
|     pub private_key_secret_ref: PrivateKeySecretRef, | ||||
|     pub server: String, | ||||
|     pub solvers: Vec<SolverSpec>, | ||||
| } | ||||
| 
 | ||||
| #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct PrivateKeySecretRef { | ||||
|     pub name: String, | ||||
| } | ||||
| 
 | ||||
| #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct SolverSpec { | ||||
|     pub http01: Option<Http01Solver>, | ||||
|     // Other solver types (e.g., dns01) would go here as Options
 | ||||
| } | ||||
| 
 | ||||
| #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct Http01Solver { | ||||
|     pub ingress: Http01Ingress, | ||||
| } | ||||
| 
 | ||||
| #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] | ||||
| #[serde(rename_all = "camelCase")] | ||||
| pub struct Http01Ingress { | ||||
|     pub class: String, | ||||
| } | ||||
| @ -1,5 +1,6 @@ | ||||
| use std::{collections::HashMap, str::FromStr}; | ||||
| 
 | ||||
| use harmony_macros::hurl; | ||||
| use non_blank_string_rs::NonBlankString; | ||||
| use serde::Serialize; | ||||
| use url::Url; | ||||
| @ -33,7 +34,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore { | ||||
|             install_only: true, | ||||
|             repository: Some(HelmRepository::new( | ||||
|                 "jetstack".to_string(), | ||||
|                 Url::parse("https://charts.jetstack.io").unwrap(), | ||||
|                 hurl!("https://charts.jetstack.io"), | ||||
|                 true, | ||||
|             )), | ||||
|         } | ||||
|  | ||||
| @ -1,2 +1,3 @@ | ||||
| pub mod cluster_issuer; | ||||
| mod helm; | ||||
| pub use helm::*; | ||||
|  | ||||
| @ -69,17 +69,14 @@ impl DhcpInterpret { | ||||
| 
 | ||||
|         dhcp_server.set_pxe_options(pxe_options).await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!( | ||||
|                 "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]", | ||||
|                 self.score.boot_filename, | ||||
|                 self.score.boot_filename, | ||||
|                 self.score.filename, | ||||
|                 self.score.filename64, | ||||
|                 self.score.filenameipxe | ||||
|             ), | ||||
|         )) | ||||
|         Ok(Outcome::success(format!( | ||||
|             "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]", | ||||
|             self.score.boot_filename, | ||||
|             self.score.boot_filename, | ||||
|             self.score.filename, | ||||
|             self.score.filename64, | ||||
|             self.score.filenameipxe | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -122,8 +119,7 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret { | ||||
| 
 | ||||
|         topology.commit_config().await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|         Ok(Outcome::success( | ||||
|             "Dhcp Interpret execution successful".to_string(), | ||||
|         )) | ||||
|     } | ||||
| @ -197,10 +193,10 @@ impl DhcpHostBindingInterpret { | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!("Dhcp Interpret registered {} entries", number_new_entries), | ||||
|         )) | ||||
|         Ok(Outcome::success(format!( | ||||
|             "Dhcp Interpret registered {} entries", | ||||
|             number_new_entries | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -236,12 +232,9 @@ impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret { | ||||
| 
 | ||||
|         topology.commit_config().await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!( | ||||
|                 "Dhcp Host Binding Interpret execution successful on {} hosts", | ||||
|                 self.score.host_binding.len() | ||||
|             ), | ||||
|         )) | ||||
|         Ok(Outcome::success(format!( | ||||
|             "Dhcp Host Binding Interpret execution successful on {} hosts", | ||||
|             self.score.host_binding.len() | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -55,8 +55,7 @@ impl DnsInterpret { | ||||
|             dns.register_dhcp_leases(register).await?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|         Ok(Outcome::success( | ||||
|             "DNS Interpret execution successfull".to_string(), | ||||
|         )) | ||||
|     } | ||||
| @ -68,13 +67,10 @@ impl DnsInterpret { | ||||
|         let entries = &self.score.dns_entries; | ||||
|         dns_server.ensure_hosts_registered(entries.clone()).await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!( | ||||
|                 "DnsInterpret registered {} hosts successfully", | ||||
|                 entries.len() | ||||
|             ), | ||||
|         )) | ||||
|         Ok(Outcome::success(format!( | ||||
|             "DnsInterpret registered {} hosts successfully", | ||||
|             entries.len() | ||||
|         ))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -111,8 +107,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret { | ||||
| 
 | ||||
|         topology.commit_config().await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|         Ok(Outcome::success( | ||||
|             "Dns Interpret execution successful".to_string(), | ||||
|         )) | ||||
|     } | ||||
|  | ||||
| @ -5,6 +5,7 @@ use crate::score::Score; | ||||
| use crate::topology::{HelmCommand, Topology}; | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::id::Id; | ||||
| use harmony_types::net::Url; | ||||
| use helm_wrapper_rs; | ||||
| use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; | ||||
| use log::{debug, info, warn}; | ||||
| @ -15,7 +16,6 @@ use std::path::Path; | ||||
| use std::process::{Command, Output, Stdio}; | ||||
| use std::str::FromStr; | ||||
| use temp_file::TempFile; | ||||
| use url::Url; | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub struct HelmRepository { | ||||
| @ -78,7 +78,8 @@ impl HelmChartInterpret { | ||||
|             repo.name, repo.url, repo.force_update | ||||
|         ); | ||||
| 
 | ||||
|         let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()]; | ||||
|         let repo_url = repo.url.to_string(); | ||||
|         let mut add_args = vec!["repo", "add", &repo.name, &repo_url]; | ||||
|         if repo.force_update { | ||||
|             add_args.push("--force-update"); | ||||
|         } | ||||
| @ -153,6 +154,10 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret { | ||||
|         let yaml_path: Option<&Path> = match self.score.values_yaml.as_ref() { | ||||
|             Some(yaml_str) => { | ||||
|                 tf = temp_file::with_contents(yaml_str.as_bytes()); | ||||
|                 debug!( | ||||
|                     "values yaml string for chart {} :\n {yaml_str}", | ||||
|                     self.score.chart_name | ||||
|                 ); | ||||
|                 Some(tf.path()) | ||||
|             } | ||||
|             None => None, | ||||
| @ -193,13 +198,10 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret { | ||||
|                     self.score.release_name, ns | ||||
|                 ); | ||||
| 
 | ||||
|                 return Ok(Outcome::new( | ||||
|                     InterpretStatus::SUCCESS, | ||||
|                     format!( | ||||
|                         "Helm Chart '{}' already installed to namespace {ns} and install_only=true", | ||||
|                         self.score.release_name | ||||
|                     ), | ||||
|                 )); | ||||
|                 return Ok(Outcome::success(format!( | ||||
|                     "Helm Chart '{}' already installed to namespace {ns} and install_only=true", | ||||
|                     self.score.release_name | ||||
|                 ))); | ||||
|             } else { | ||||
|                 info!( | ||||
|                     "Release '{}' not found in namespace '{}'. Proceeding with installation.", | ||||
| @ -224,18 +226,18 @@ impl<T: Topology + HelmCommand> Interpret<T> for HelmChartInterpret { | ||||
|         }; | ||||
| 
 | ||||
|         match status { | ||||
|             helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::new( | ||||
|                 InterpretStatus::SUCCESS, | ||||
|                 format!("Helm Chart {} deployed", self.score.release_name), | ||||
|             )), | ||||
|             helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::new( | ||||
|                 InterpretStatus::RUNNING, | ||||
|                 format!("Helm Chart {} pending install...", self.score.release_name), | ||||
|             )), | ||||
|             helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::new( | ||||
|                 InterpretStatus::RUNNING, | ||||
|                 format!("Helm Chart {} pending upgrade...", self.score.release_name), | ||||
|             )), | ||||
|             helm_wrapper_rs::HelmDeployStatus::Deployed => Ok(Outcome::success(format!( | ||||
|                 "Helm Chart {} deployed", | ||||
|                 self.score.release_name | ||||
|             ))), | ||||
|             helm_wrapper_rs::HelmDeployStatus::PendingInstall => Ok(Outcome::running(format!( | ||||
|                 "Helm Chart {} pending install...", | ||||
|                 self.score.release_name | ||||
|             ))), | ||||
|             helm_wrapper_rs::HelmDeployStatus::PendingUpgrade => Ok(Outcome::running(format!( | ||||
|                 "Helm Chart {} pending upgrade...", | ||||
|                 self.score.release_name | ||||
|             ))), | ||||
|             helm_wrapper_rs::HelmDeployStatus::Failed => Err(InterpretError::new(format!( | ||||
|                 "Helm Chart {} installation failed", | ||||
|                 self.score.release_name | ||||
|  | ||||
| @ -1,364 +0,0 @@ | ||||
| use async_trait::async_trait; | ||||
| use log::debug; | ||||
| use serde::Serialize; | ||||
| use std::collections::HashMap; | ||||
| use std::io::ErrorKind; | ||||
| use std::path::PathBuf; | ||||
| use std::process::{Command, Output}; | ||||
| use temp_dir::{self, TempDir}; | ||||
| use temp_file::TempFile; | ||||
| 
 | ||||
| use crate::data::Version; | ||||
| use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; | ||||
| use crate::inventory::Inventory; | ||||
| use crate::score::Score; | ||||
| use crate::topology::{HelmCommand, K8sclient, Topology}; | ||||
| use harmony_types::id::Id; | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| pub struct HelmCommandExecutor { | ||||
|     pub env: HashMap<String, String>, | ||||
|     pub path: Option<PathBuf>, | ||||
|     pub args: Vec<String>, | ||||
|     pub api_versions: Option<Vec<String>>, | ||||
|     pub kube_version: String, | ||||
|     pub debug: Option<bool>, | ||||
|     pub globals: HelmGlobals, | ||||
|     pub chart: HelmChart, | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| pub struct HelmGlobals { | ||||
|     pub chart_home: Option<PathBuf>, | ||||
|     pub config_home: Option<PathBuf>, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub struct HelmChart { | ||||
|     pub name: String, | ||||
|     pub version: Option<String>, | ||||
|     pub repo: Option<String>, | ||||
|     pub release_name: Option<String>, | ||||
|     pub namespace: Option<String>, | ||||
|     pub additional_values_files: Vec<PathBuf>, | ||||
|     pub values_file: Option<PathBuf>, | ||||
|     pub values_inline: Option<String>, | ||||
|     pub include_crds: Option<bool>, | ||||
|     pub skip_hooks: Option<bool>, | ||||
|     pub api_versions: Option<Vec<String>>, | ||||
|     pub kube_version: Option<String>, | ||||
|     pub name_template: String, | ||||
|     pub skip_tests: Option<bool>, | ||||
|     pub debug: Option<bool>, | ||||
| } | ||||
| 
 | ||||
| impl HelmCommandExecutor { | ||||
|     pub fn generate(mut self) -> Result<String, std::io::Error> { | ||||
|         if self.globals.chart_home.is_none() { | ||||
|             self.globals.chart_home = Some(PathBuf::from("charts")); | ||||
|         } | ||||
| 
 | ||||
|         if self | ||||
|             .clone() | ||||
|             .chart | ||||
|             .clone() | ||||
|             .chart_exists_locally(self.clone().globals.chart_home.unwrap()) | ||||
|             .is_none() | ||||
|         { | ||||
|             if self.chart.repo.is_none() { | ||||
|                 return Err(std::io::Error::new( | ||||
|                     ErrorKind::Other, | ||||
|                     "Chart doesn't exist locally and no repo specified", | ||||
|                 )); | ||||
|             } | ||||
|             self.clone().run_command( | ||||
|                 self.chart | ||||
|                     .clone() | ||||
|                     .pull_command(self.globals.chart_home.clone().unwrap()), | ||||
|             )?; | ||||
|         } | ||||
| 
 | ||||
|         let out = self.clone().run_command( | ||||
|             self.chart | ||||
|                 .clone() | ||||
|                 .helm_args(self.globals.chart_home.clone().unwrap()), | ||||
|         )?; | ||||
| 
 | ||||
|         // TODO: don't use unwrap here
 | ||||
|         let s = String::from_utf8(out.stdout).unwrap(); | ||||
|         debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap()); | ||||
|         debug!("helm status: {}", out.status); | ||||
|         debug!("helm output: {s}"); | ||||
| 
 | ||||
|         let clean = s.split_once("---").unwrap().1; | ||||
| 
 | ||||
|         Ok(clean.to_string()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn version(self) -> Result<String, std::io::Error> { | ||||
|         let out = self.run_command(vec![ | ||||
|             "version".to_string(), | ||||
|             "-c".to_string(), | ||||
|             "--short".to_string(), | ||||
|         ])?; | ||||
| 
 | ||||
|         // TODO: don't use unwrap
 | ||||
|         Ok(String::from_utf8(out.stdout).unwrap()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> { | ||||
|         if let Some(d) = self.debug { | ||||
|             if d { | ||||
|                 args.push("--debug".to_string()); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         let path = if let Some(p) = self.path { | ||||
|             p | ||||
|         } else { | ||||
|             PathBuf::from("helm") | ||||
|         }; | ||||
| 
 | ||||
|         let config_home = match self.globals.config_home { | ||||
|             Some(p) => p, | ||||
|             None => PathBuf::from(TempDir::new()?.path()), | ||||
|         }; | ||||
| 
 | ||||
|         if let Some(yaml_str) = self.chart.values_inline { | ||||
|             let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes()); | ||||
|             self.chart | ||||
|                 .additional_values_files | ||||
|                 .push(PathBuf::from(tf.path())); | ||||
|         }; | ||||
| 
 | ||||
|         self.env.insert( | ||||
|             "HELM_CONFIG_HOME".to_string(), | ||||
|             config_home.to_str().unwrap().to_string(), | ||||
|         ); | ||||
|         self.env.insert( | ||||
|             "HELM_CACHE_HOME".to_string(), | ||||
|             config_home.to_str().unwrap().to_string(), | ||||
|         ); | ||||
|         self.env.insert( | ||||
|             "HELM_DATA_HOME".to_string(), | ||||
|             config_home.to_str().unwrap().to_string(), | ||||
|         ); | ||||
| 
 | ||||
|         Command::new(path).envs(self.env).args(args).output() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl HelmChart { | ||||
|     pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> { | ||||
|         let chart_path = | ||||
|             PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string()); | ||||
| 
 | ||||
|         if chart_path.exists() { | ||||
|             Some(chart_path) | ||||
|         } else { | ||||
|             None | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> { | ||||
|         let mut args = vec![ | ||||
|             "pull".to_string(), | ||||
|             "--untar".to_string(), | ||||
|             "--untardir".to_string(), | ||||
|             chart_home.to_str().unwrap().to_string(), | ||||
|         ]; | ||||
| 
 | ||||
|         match self.repo { | ||||
|             Some(r) => { | ||||
|                 if r.starts_with("oci://") { | ||||
|                     args.push( | ||||
|                         r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(), | ||||
|                     ); | ||||
|                 } else { | ||||
|                     args.push("--repo".to_string()); | ||||
|                     args.push(r.to_string()); | ||||
| 
 | ||||
|                     args.push(self.name); | ||||
|                 } | ||||
|             } | ||||
|             None => args.push(self.name), | ||||
|         }; | ||||
| 
 | ||||
|         if let Some(v) = self.version { | ||||
|             args.push("--version".to_string()); | ||||
|             args.push(v.to_string()); | ||||
|         } | ||||
| 
 | ||||
|         args | ||||
|     } | ||||
| 
 | ||||
|     pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> { | ||||
|         let mut args: Vec<String> = vec!["template".to_string()]; | ||||
| 
 | ||||
|         match self.release_name { | ||||
|             Some(rn) => args.push(rn.to_string()), | ||||
|             None => args.push("--generate-name".to_string()), | ||||
|         } | ||||
| 
 | ||||
|         args.push( | ||||
|             PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str()) | ||||
|                 .to_str() | ||||
|                 .unwrap() | ||||
|                 .to_string(), | ||||
|         ); | ||||
| 
 | ||||
|         if let Some(n) = self.namespace { | ||||
|             args.push("--namespace".to_string()); | ||||
|             args.push(n.to_string()); | ||||
|         } | ||||
| 
 | ||||
|         if let Some(f) = self.values_file { | ||||
|             args.push("-f".to_string()); | ||||
|             args.push(f.to_str().unwrap().to_string()); | ||||
|         } | ||||
| 
 | ||||
|         for f in self.additional_values_files { | ||||
|             args.push("-f".to_string()); | ||||
|             args.push(f.to_str().unwrap().to_string()); | ||||
|         } | ||||
| 
 | ||||
|         if let Some(vv) = self.api_versions { | ||||
|             for v in vv { | ||||
|                 args.push("--api-versions".to_string()); | ||||
|                 args.push(v); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         if let Some(kv) = self.kube_version { | ||||
|             args.push("--kube-version".to_string()); | ||||
|             args.push(kv); | ||||
|         } | ||||
| 
 | ||||
|         if let Some(crd) = self.include_crds { | ||||
|             if crd { | ||||
|                 args.push("--include-crds".to_string()); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         if let Some(st) = self.skip_tests { | ||||
|             if st { | ||||
|                 args.push("--skip-tests".to_string()); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         if let Some(sh) = self.skip_hooks { | ||||
|             if sh { | ||||
|                 args.push("--no-hooks".to_string()); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         if let Some(d) = self.debug { | ||||
|             if d { | ||||
|                 args.push("--debug".to_string()); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         args | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub struct HelmChartScoreV2 { | ||||
|     pub chart: HelmChart, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(HelmChartInterpretV2 { | ||||
|             score: self.clone(), | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         format!( | ||||
|             "{} {} HelmChartScoreV2", | ||||
|             self.chart | ||||
|                 .release_name | ||||
|                 .clone() | ||||
|                 .unwrap_or("Unknown".to_string()), | ||||
|             self.chart.name | ||||
|         ) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Serialize)] | ||||
| pub struct HelmChartInterpretV2 { | ||||
|     pub score: HelmChartScoreV2, | ||||
| } | ||||
| impl HelmChartInterpretV2 {} | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let _ns = self | ||||
|             .score | ||||
|             .chart | ||||
|             .namespace | ||||
|             .as_ref() | ||||
|             .unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster")); | ||||
| 
 | ||||
|         let helm_executor = HelmCommandExecutor { | ||||
|             env: HashMap::new(), | ||||
|             path: None, | ||||
|             args: vec![], | ||||
|             api_versions: None, | ||||
|             kube_version: "v1.33.0".to_string(), | ||||
|             debug: Some(false), | ||||
|             globals: HelmGlobals { | ||||
|                 chart_home: None, | ||||
|                 config_home: None, | ||||
|             }, | ||||
|             chart: self.score.chart.clone(), | ||||
|         }; | ||||
| 
 | ||||
|         // let mut helm_options = Vec::new();
 | ||||
|         // if self.score.create_namespace {
 | ||||
|         //     helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
 | ||||
|         // }
 | ||||
| 
 | ||||
|         let res = helm_executor.generate(); | ||||
| 
 | ||||
|         let _output = match res { | ||||
|             Ok(output) => output, | ||||
|             Err(err) => return Err(InterpretError::new(err.to_string())), | ||||
|         }; | ||||
| 
 | ||||
|         // TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
 | ||||
| 
 | ||||
|         // let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
 | ||||
| 
 | ||||
|         // let client = topology
 | ||||
|         //     .k8s_client()
 | ||||
|         //     .await
 | ||||
|         //     .expect("Environment should provide enough information to instanciate a client")
 | ||||
|         //     .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
 | ||||
|         // match client.apply_yaml(output) {
 | ||||
|         //     Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
 | ||||
|         //     Err(e) => return Err(InterpretError::new(e)),
 | ||||
|         // }
 | ||||
| 
 | ||||
|         Ok(Outcome::success("Helm chart deployed".to_string())) | ||||
|     } | ||||
| 
 | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::HelmCommand | ||||
|     } | ||||
|     fn get_version(&self) -> Version { | ||||
|         todo!() | ||||
|     } | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| @ -1,2 +1 @@ | ||||
| pub mod chart; | ||||
| pub mod command; | ||||
|  | ||||