Compare commits
	
		
			37 Commits
		
	
	
		
			32ffc3ef61
			...
			85d7baac6c
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 85d7baac6c | |||
| 62fa3c2b10 | |||
| ea1380f98a | |||
| 701d8cfab9 | |||
| f9906cb419 | |||
| cb4382fbb5 | |||
| 1eca2cc1a9 | |||
| 269f13ae9b | |||
| ec277bc13d | |||
| a9f8cd16ea | |||
| c542a935e3 | |||
| 0395d11e98 | |||
| 05e7b8075c | |||
| b857412151 | |||
| 7bb3602ab8 | |||
| 78b80c2169 | |||
| 0876f4e4f0 | |||
| 6ac0e095a3 | |||
| ff2efc0a66 | |||
|  | f180cc4c80 | ||
| 8cc7adf196 | |||
| a1ab5d40fb | |||
| 6c92dd24f7 | |||
| c805d7e018 | |||
| b33615b969 | |||
| 0f59f29ac4 | |||
| 361f240762 | |||
| 57c3b01e66 | |||
| 94ddf027dd | |||
| 06a2be4496 | |||
| e2a09efdee | |||
| 2618441de3 | |||
| da6610c625 | |||
| e956772593 | |||
| 27c51e0ec5 | |||
| 597dcbc848 | |||
| a53e8552e9 | 
| @ -0,0 +1,32 @@ | |||||||
|  | { | ||||||
|  |   "db_name": "SQLite", | ||||||
|  |   "query": "SELECT id, version_id, data as \"data: Json<PhysicalHost>\" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1", | ||||||
|  |   "describe": { | ||||||
|  |     "columns": [ | ||||||
|  |       { | ||||||
|  |         "name": "id", | ||||||
|  |         "ordinal": 0, | ||||||
|  |         "type_info": "Text" | ||||||
|  |       }, | ||||||
|  |       { | ||||||
|  |         "name": "version_id", | ||||||
|  |         "ordinal": 1, | ||||||
|  |         "type_info": "Text" | ||||||
|  |       }, | ||||||
|  |       { | ||||||
|  |         "name": "data: Json<PhysicalHost>", | ||||||
|  |         "ordinal": 2, | ||||||
|  |         "type_info": "Null" | ||||||
|  |       } | ||||||
|  |     ], | ||||||
|  |     "parameters": { | ||||||
|  |       "Right": 1 | ||||||
|  |     }, | ||||||
|  |     "nullable": [ | ||||||
|  |       false, | ||||||
|  |       false, | ||||||
|  |       false | ||||||
|  |     ] | ||||||
|  |   }, | ||||||
|  |   "hash": "934035c7ca6e064815393e4e049a7934b0a7fac04a4fe4b2a354f0443d630990" | ||||||
|  | } | ||||||
| @ -0,0 +1,12 @@ | |||||||
|  | { | ||||||
|  |   "db_name": "SQLite", | ||||||
|  |   "query": "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)", | ||||||
|  |   "describe": { | ||||||
|  |     "columns": [], | ||||||
|  |     "parameters": { | ||||||
|  |       "Right": 3 | ||||||
|  |     }, | ||||||
|  |     "nullable": [] | ||||||
|  |   }, | ||||||
|  |   "hash": "f10f615ee42129ffa293e46f2f893d65a237d31d24b74a29c6a8d8420d255ab8" | ||||||
|  | } | ||||||
							
								
								
									
										1075
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										1075
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -14,7 +14,7 @@ members = [ | |||||||
|   "harmony_composer", |   "harmony_composer", | ||||||
|   "harmony_inventory_agent", |   "harmony_inventory_agent", | ||||||
|   "harmony_secret_derive", |   "harmony_secret_derive", | ||||||
|   "harmony_secret", |   "harmony_secret", "adr/agent_discovery/mdns", | ||||||
| ] | ] | ||||||
| 
 | 
 | ||||||
| [workspace.package] | [workspace.package] | ||||||
| @ -36,7 +36,7 @@ tokio = { version = "1.40", features = [ | |||||||
| cidr = { features = ["serde"], version = "0.2" } | cidr = { features = ["serde"], version = "0.2" } | ||||||
| russh = "0.45" | russh = "0.45" | ||||||
| russh-keys = "0.45" | russh-keys = "0.45" | ||||||
| rand = "0.8" | rand = "0.9" | ||||||
| url = "2.5" | url = "2.5" | ||||||
| kube = { version = "1.1.0", features = [ | kube = { version = "1.1.0", features = [ | ||||||
|   "config", |   "config", | ||||||
| @ -65,3 +65,6 @@ directories = "6.0.0" | |||||||
| thiserror = "2.0.14" | thiserror = "2.0.14" | ||||||
| serde = { version = "1.0.209", features = ["derive", "rc"] } | serde = { version = "1.0.209", features = ["derive", "rc"] } | ||||||
| serde_json = "1.0.127" | serde_json = "1.0.127" | ||||||
|  | askama = "0.14" | ||||||
|  | sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] } | ||||||
|  | reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false } | ||||||
|  | |||||||
							
								
								
									
										17
									
								
								adr/agent_discovery/mdns/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								adr/agent_discovery/mdns/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,17 @@ | |||||||
|  | [package] | ||||||
|  | name = "mdns" | ||||||
|  | edition = "2024" | ||||||
|  | version.workspace = true | ||||||
|  | readme.workspace = true | ||||||
|  | license.workspace = true | ||||||
|  | 
 | ||||||
|  | [dependencies] | ||||||
|  | mdns-sd = "0.14" | ||||||
|  | tokio = { version = "1", features = ["full"] } | ||||||
|  | futures = "0.3" | ||||||
|  | dmidecode = "0.2" # For getting the motherboard ID on the agent | ||||||
|  | log.workspace=true | ||||||
|  | env_logger.workspace=true | ||||||
|  | clap = { version = "4.5.46", features = ["derive"] } | ||||||
|  | get_if_addrs = "0.5.3" | ||||||
|  | local-ip-address = "0.6.5" | ||||||
							
								
								
									
										60
									
								
								adr/agent_discovery/mdns/src/advertise.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								adr/agent_discovery/mdns/src/advertise.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,60 @@ | |||||||
|  | // harmony-agent/src/main.rs
 | ||||||
|  | 
 | ||||||
|  | use log::info; | ||||||
|  | use mdns_sd::{ServiceDaemon, ServiceInfo}; | ||||||
|  | use std::collections::HashMap; | ||||||
|  | 
 | ||||||
|  | use crate::SERVICE_TYPE; | ||||||
|  | 
 | ||||||
|  | // The service we are advertising.
 | ||||||
|  | const SERVICE_PORT: u16 = 43210; // A port for the service. It needs one, even if unused.
 | ||||||
|  | 
 | ||||||
|  | pub async fn advertise() { | ||||||
|  |     info!("Starting Harmony Agent..."); | ||||||
|  | 
 | ||||||
|  |     // Get a unique ID for this machine.
 | ||||||
|  |     let motherboard_id = "some motherboard id"; | ||||||
|  |     let instance_name = format!("harmony-agent-{}", motherboard_id); | ||||||
|  |     info!("This agent's instance name: {}", instance_name); | ||||||
|  |     info!("Advertising with ID: {}", motherboard_id); | ||||||
|  | 
 | ||||||
|  |     // Create a new mDNS daemon.
 | ||||||
|  |     let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon"); | ||||||
|  | 
 | ||||||
|  |     // Create a TXT record HashMap to hold our metadata.
 | ||||||
|  |     let mut properties = HashMap::new(); | ||||||
|  |     properties.insert("id".to_string(), motherboard_id.to_string()); | ||||||
|  |     properties.insert("version".to_string(), "1.0".to_string()); | ||||||
|  | 
 | ||||||
|  |     // Create the service information.
 | ||||||
|  |     // The instance name should be unique on the network.
 | ||||||
|  |     let local_ip = local_ip_address::local_ip().unwrap(); | ||||||
|  |     let service_info = ServiceInfo::new( | ||||||
|  |         SERVICE_TYPE, | ||||||
|  |         &instance_name, | ||||||
|  |         "harmony-host.local.", // A hostname for the service
 | ||||||
|  |         local_ip, | ||||||
|  |         // "0.0.0.0",
 | ||||||
|  |         SERVICE_PORT, | ||||||
|  |         Some(properties), | ||||||
|  |     ) | ||||||
|  |     .expect("Failed to create service info"); | ||||||
|  | 
 | ||||||
|  |     // Register our service with the daemon.
 | ||||||
|  |     mdns.register(service_info) | ||||||
|  |         .expect("Failed to register service"); | ||||||
|  | 
 | ||||||
|  |     info!( | ||||||
|  |         "Service '{}' registered and now being advertised.", | ||||||
|  |         instance_name | ||||||
|  |     ); | ||||||
|  |     info!("Agent is running. Press Ctrl+C to exit."); | ||||||
|  | 
 | ||||||
|  |     for iface in get_if_addrs::get_if_addrs().unwrap() { | ||||||
|  |         println!("{:#?}", iface); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     // Keep the agent running indefinitely.
 | ||||||
|  |     tokio::signal::ctrl_c().await.unwrap(); | ||||||
|  |     info!("Shutting down agent."); | ||||||
|  | } | ||||||
							
								
								
									
										110
									
								
								adr/agent_discovery/mdns/src/discover.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								adr/agent_discovery/mdns/src/discover.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,110 @@ | |||||||
|  | use log::debug; | ||||||
|  | use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||||
|  | 
 | ||||||
|  | use crate::SERVICE_TYPE; | ||||||
|  | 
 | ||||||
|  | pub async fn discover() { | ||||||
|  |     println!("Starting Harmony Master and browsing for agents..."); | ||||||
|  | 
 | ||||||
|  |     // Create a new mDNS daemon.
 | ||||||
|  |     let mdns = ServiceDaemon::new().expect("Failed to create mDNS daemon"); | ||||||
|  | 
 | ||||||
|  |     // Start browsing for the service type.
 | ||||||
|  |     // The receiver will be a stream of events.
 | ||||||
|  |     let receiver = mdns.browse(SERVICE_TYPE).expect("Failed to browse"); | ||||||
|  | 
 | ||||||
|  |     println!( | ||||||
|  |         "Listening for mDNS events for '{}'. Press Ctrl+C to exit.", | ||||||
|  |         SERVICE_TYPE | ||||||
|  |     ); | ||||||
|  | 
 | ||||||
|  |     std::thread::spawn(move || { | ||||||
|  |         while let Ok(event) = receiver.recv() { | ||||||
|  |             match event { | ||||||
|  |                 ServiceEvent::ServiceData(resolved) => { | ||||||
|  |                     println!("Resolved a new service: {}", resolved.fullname); | ||||||
|  |                 } | ||||||
|  |                 other_event => { | ||||||
|  |                     println!("Received other event: {:?}", &other_event); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     }); | ||||||
|  | 
 | ||||||
|  |     // Gracefully shutdown the daemon.
 | ||||||
|  |     std::thread::sleep(std::time::Duration::from_secs(1000000)); | ||||||
|  |     mdns.shutdown().unwrap(); | ||||||
|  | 
 | ||||||
|  |     // Process events as they come in.
 | ||||||
|  |     // while let Ok(event) = receiver.recv_async().await {
 | ||||||
|  |     //     debug!("Received event {event:?}");
 | ||||||
|  |     //     // match event {
 | ||||||
|  |     //     //     ServiceEvent::ServiceFound(svc_type, fullname) => {
 | ||||||
|  |     //     //         println!("\n--- Agent Discovered ---");
 | ||||||
|  |     //     //         println!("  Service Name: {}", fullname());
 | ||||||
|  |     //     //         // You can now resolve this service to get its IP, port, and TXT records
 | ||||||
|  |     //     //         // The resolve operation is a separate network call.
 | ||||||
|  |     //     //         let receiver = mdns.browse(info.get_fullname()).unwrap();
 | ||||||
|  |     //     //         if let Ok(resolve_event) = receiver.recv_timeout(Duration::from_secs(2)) {
 | ||||||
|  |     //     //              if let ServiceEvent::ServiceResolved(info) = resolve_event {
 | ||||||
|  |     //     //                 let ip = info.get_addresses().iter().next().unwrap();
 | ||||||
|  |     //     //                 let port = info.get_port();
 | ||||||
|  |     //     //                 let motherboard_id = info.get_property("id").map_or("N/A", |v| v.val_str());
 | ||||||
|  |     //     //
 | ||||||
|  |     //     //                 println!("  IP: {}:{}", ip, port);
 | ||||||
|  |     //     //                 println!("  Motherboard ID: {}", motherboard_id);
 | ||||||
|  |     //     //                 println!("------------------------");
 | ||||||
|  |     //     //
 | ||||||
|  |     //     //                 // TODO: Add this agent to your central list of discovered hosts.
 | ||||||
|  |     //     //              }
 | ||||||
|  |     //     //         } else {
 | ||||||
|  |     //     //             println!("Could not resolve service '{}' in time.", info.get_fullname());
 | ||||||
|  |     //     //         }
 | ||||||
|  |     //     //     }
 | ||||||
|  |     //     //     ServiceEvent::ServiceRemoved(info) => {
 | ||||||
|  |     //     //         println!("\n--- Agent Removed ---");
 | ||||||
|  |     //     //         println!("  Service Name: {}", info.get_fullname());
 | ||||||
|  |     //     //         println!("---------------------");
 | ||||||
|  |     //     //         // TODO: Remove this agent from your list.
 | ||||||
|  |     //     //     }
 | ||||||
|  |     //     //     _ => {
 | ||||||
|  |     //     //         // We don't care about other event types for this example
 | ||||||
|  |     //     //     }
 | ||||||
|  |     //     // }
 | ||||||
|  |     // }
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | async fn discover_example() { | ||||||
|  |     use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||||
|  | 
 | ||||||
|  |     // Create a daemon
 | ||||||
|  |     let mdns = ServiceDaemon::new().expect("Failed to create daemon"); | ||||||
|  | 
 | ||||||
|  |     // Use recently added `ServiceEvent::ServiceData`.
 | ||||||
|  |     mdns.use_service_data(true) | ||||||
|  |         .expect("Failed to use ServiceData"); | ||||||
|  | 
 | ||||||
|  |     // Browse for a service type.
 | ||||||
|  |     let service_type = "_mdns-sd-my-test._udp.local."; | ||||||
|  |     let receiver = mdns.browse(service_type).expect("Failed to browse"); | ||||||
|  | 
 | ||||||
|  |     // Receive the browse events in sync or async. Here is
 | ||||||
|  |     // an example of using a thread. Users can call `receiver.recv_async().await`
 | ||||||
|  |     // if running in async environment.
 | ||||||
|  |     std::thread::spawn(move || { | ||||||
|  |         while let Ok(event) = receiver.recv() { | ||||||
|  |             match event { | ||||||
|  |                 ServiceEvent::ServiceData(resolved) => { | ||||||
|  |                     println!("Resolved a new service: {}", resolved.fullname); | ||||||
|  |                 } | ||||||
|  |                 other_event => { | ||||||
|  |                     println!("Received other event: {:?}", &other_event); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     }); | ||||||
|  | 
 | ||||||
|  |     // Gracefully shutdown the daemon.
 | ||||||
|  |     std::thread::sleep(std::time::Duration::from_secs(1)); | ||||||
|  |     mdns.shutdown().unwrap(); | ||||||
|  | } | ||||||
							
								
								
									
										31
									
								
								adr/agent_discovery/mdns/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								adr/agent_discovery/mdns/src/main.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,31 @@ | |||||||
|  | use clap::{Parser, ValueEnum}; | ||||||
|  | 
 | ||||||
|  | mod advertise; | ||||||
|  | mod discover; | ||||||
|  | 
 | ||||||
|  | #[derive(Parser, Debug)] | ||||||
|  | #[command(version, about, long_about = None)] | ||||||
|  | struct Args { | ||||||
|  |     #[arg(value_enum)] | ||||||
|  |     profile: Profiles, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)] | ||||||
|  | enum Profiles { | ||||||
|  |     Advertise, | ||||||
|  |     Discover, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The service type we are looking for.
 | ||||||
|  | const SERVICE_TYPE: &str = "_harmony._tcp.local."; | ||||||
|  | 
 | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() { | ||||||
|  |     env_logger::init(); | ||||||
|  |     let args = Args::parse(); | ||||||
|  | 
 | ||||||
|  |     match args.profile { | ||||||
|  |         Profiles::Advertise => advertise::advertise().await, | ||||||
|  |         Profiles::Discover => discover::discover().await, | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										1
									
								
								check.sh
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								check.sh
									
									
									
									
									
								
							| @ -1,6 +1,7 @@ | |||||||
| #!/bin/sh | #!/bin/sh | ||||||
| set -e | set -e | ||||||
| 
 | 
 | ||||||
|  | rustc --version | ||||||
| cargo check --all-targets --all-features --keep-going | cargo check --all-targets --all-features --keep-going | ||||||
| cargo fmt --check | cargo fmt --check | ||||||
| cargo clippy | cargo clippy | ||||||
|  | |||||||
							
								
								
									
										8
									
								
								data/pxe/okd/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								data/pxe/okd/README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,8 @@ | |||||||
|  | Here lies all the data files required for an OKD cluster PXE boot setup. | ||||||
|  | 
 | ||||||
|  | This inclues ISO files, binary boot files, ipxe, etc. | ||||||
|  | 
 | ||||||
|  | TODO as of august 2025 : | ||||||
|  | 
 | ||||||
|  | - `harmony_inventory_agent` should be downloaded from official releases, this embedded version is practical for now though | ||||||
|  | - The cluster ssh key should be generated and handled by harmony with the private key saved in a secret store | ||||||
							
								
								
									
										9
									
								
								data/pxe/okd/http_files/.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								data/pxe/okd/http_files/.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,9 @@ | |||||||
|  | harmony_inventory_agent filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9 filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/images filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/initrd.img filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/vmlinuz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/images/efiboot.img filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/images/install.img filter=lfs diff=lfs merge=lfs -text | ||||||
|  | os/centos-stream-9/images/pxeboot filter=lfs diff=lfs merge=lfs -text | ||||||
							
								
								
									
										1
									
								
								data/pxe/okd/http_files/cluster_ssh_key.pub
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								data/pxe/okd/http_files/cluster_ssh_key.pub
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2 | ||||||
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/harmony_inventory_agent
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/harmony_inventory_agent
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/efiboot.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/install.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/install.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/images/pxeboot/vmlinuz
									
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/initrd.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/initrd.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/http_files/os/centos-stream-9/vmlinuz
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/tftpboot/ipxe.efi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/tftpboot/ipxe.efi
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/pxe/okd/tftpboot/undionly.kpxe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/pxe/okd/tftpboot/undionly.kpxe
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										108
									
								
								docs/pxe_test/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										108
									
								
								docs/pxe_test/README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,108 @@ | |||||||
|  | # OPNsense PXE Lab Environment | ||||||
|  | 
 | ||||||
|  | This project contains a script to automatically set up a virtual lab environment for testing PXE boot services managed by an OPNsense firewall. | ||||||
|  | 
 | ||||||
|  | ## Overview | ||||||
|  | 
 | ||||||
|  | The `pxe_vm_lab_setup.sh` script will create the following resources using libvirt/KVM: | ||||||
|  | 
 | ||||||
|  | 1.  **A Virtual Network**: An isolated network named `harmonylan` (`virbr1`) for the lab. | ||||||
|  | 2.  **Two Virtual Machines**: | ||||||
|  |     *   `opnsense-pxe`: A firewall VM that will act as the gateway and PXE server. | ||||||
|  |     *   `pxe-node-1`: A client VM configured to boot from the network. | ||||||
|  | 
 | ||||||
|  | ## Prerequisites | ||||||
|  | 
 | ||||||
|  | Ensure you have the following software installed on your Arch Linux host: | ||||||
|  | 
 | ||||||
|  | *   `libvirt` | ||||||
|  | *   `qemu` | ||||||
|  | *   `virt-install` (from the `virt-install` package) | ||||||
|  | *   `curl` | ||||||
|  | *   `bzip2` | ||||||
|  | 
 | ||||||
|  | ## Usage | ||||||
|  | 
 | ||||||
|  | ### 1. Create the Environment | ||||||
|  | 
 | ||||||
|  | Run the `up` command to download the necessary images and create the network and VMs. | ||||||
|  | 
 | ||||||
|  | ```bash | ||||||
|  | sudo ./pxe_vm_lab_setup.sh up | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | ### 2. Install and Configure OPNsense | ||||||
|  | 
 | ||||||
|  | The OPNsense VM is created but the OS needs to be installed manually via the console. | ||||||
|  | 
 | ||||||
|  | 1.  **Connect to the VM console**: | ||||||
|  |     ```bash | ||||||
|  |     sudo virsh console opnsense-pxe | ||||||
|  |     ``` | ||||||
|  | 
 | ||||||
|  | 2.  **Log in as the installer**: | ||||||
|  |     *   Username: `installer` | ||||||
|  |     *   Password: `opnsense` | ||||||
|  | 
 | ||||||
|  | 3.  **Follow the on-screen installation wizard**. When prompted to assign network interfaces (`WAN` and `LAN`): | ||||||
|  |     *   Find the MAC address for the `harmonylan` interface by running this command in another terminal: | ||||||
|  |         ```bash | ||||||
|  |         virsh domiflist opnsense-pxe | ||||||
|  |         # Example output: | ||||||
|  |         # Interface   Type      Source       Model    MAC | ||||||
|  |         # --------------------------------------------------------- | ||||||
|  |         # vnet18      network   default      virtio   52:54:00:b5:c4:6d | ||||||
|  |         # vnet19      network   harmonylan   virtio   52:54:00:21:f9:ba | ||||||
|  |         ``` | ||||||
|  |     *   Assign the interface connected to `harmonylan` (e.g., `vtnet1` with MAC `52:54:00:21:f9:ba`) as your **LAN**. | ||||||
|  |     *   Assign the other interface as your **WAN**. | ||||||
|  | 
 | ||||||
|  | 4.  After the installation is complete, **shut down** the VM from the console menu. | ||||||
|  | 
 | ||||||
|  | 5.  **Detach the installation media** by editing the VM's configuration: | ||||||
|  |     ```bash | ||||||
|  |     sudo virsh edit opnsense-pxe | ||||||
|  |     ``` | ||||||
|  |     Find and **delete** the entire `<disk>` block corresponding to the `.img` file (the one with `<target ... bus='usb'/>`). | ||||||
|  | 
 | ||||||
|  | 6.  **Start the VM** to boot into the newly installed system: | ||||||
|  |     ```bash | ||||||
|  |     sudo virsh start opnsense-pxe | ||||||
|  |     ``` | ||||||
|  | 
 | ||||||
|  | ### 3. Connect to OPNsense from Your Host | ||||||
|  | 
 | ||||||
|  | To configure OPNsense, you need to connect your host to the `harmonylan` network. | ||||||
|  | 
 | ||||||
|  | 1.  By default, OPNsense configures its LAN interface with the IP `192.168.1.1`. | ||||||
|  | 2.  Assign a compatible IP address to your host's `virbr1` bridge interface: | ||||||
|  |     ```bash | ||||||
|  |     sudo ip addr add 192.168.1.5/24 dev virbr1 | ||||||
|  |     ``` | ||||||
|  | 3.  You can now access the OPNsense VM from your host: | ||||||
|  |     *   **SSH**: `ssh root@192.168.1.1` (password: `opnsense`) | ||||||
|  |     *   **Web UI**: `https://192.168.1.1` | ||||||
|  | 
 | ||||||
|  | ### 4. Configure PXE Services with Harmony | ||||||
|  | 
 | ||||||
|  | With connectivity established, you can now use Harmony to configure the OPNsense firewall for PXE booting. Point your Harmony OPNsense scores to the firewall using these details: | ||||||
|  | 
 | ||||||
|  | *   **Hostname/IP**: `192.168.1.1` | ||||||
|  | *   **Credentials**: `root` / `opnsense` | ||||||
|  | 
 | ||||||
|  | ### 5. Boot the PXE Client | ||||||
|  | 
 | ||||||
|  | Once your Harmony configuration has been applied and OPNsense is serving DHCP/TFTP, start the client VM. It will automatically attempt to boot from the network. | ||||||
|  | 
 | ||||||
|  | ```bash | ||||||
|  | sudo virsh start pxe-node-1 | ||||||
|  | sudo virsh console pxe-node-1 | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | ## Cleanup | ||||||
|  | 
 | ||||||
|  | To destroy all VMs and networks created by the script, run the `clean` command: | ||||||
|  | 
 | ||||||
|  | ```bash | ||||||
|  | sudo ./pxe_vm_lab_setup.sh clean | ||||||
|  | ``` | ||||||
							
								
								
									
										191
									
								
								docs/pxe_test/pxe_vm_lab_setup.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										191
									
								
								docs/pxe_test/pxe_vm_lab_setup.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,191 @@ | |||||||
|  | #!/usr/bin/env bash | ||||||
|  | set -euo pipefail | ||||||
|  | 
 | ||||||
|  | # --- Configuration --- | ||||||
|  | LAB_DIR="/var/lib/harmony_pxe_test" | ||||||
|  | IMG_DIR="${LAB_DIR}/images" | ||||||
|  | STATE_DIR="${LAB_DIR}/state" | ||||||
|  | VM_OPN="opnsense-pxe" | ||||||
|  | VM_PXE="pxe-node-1" | ||||||
|  | NET_HARMONYLAN="harmonylan" | ||||||
|  | 
 | ||||||
|  | # Network settings for the isolated LAN | ||||||
|  | VLAN_CIDR="192.168.150.0/24" | ||||||
|  | VLAN_GW="192.168.150.1" | ||||||
|  | VLAN_MASK="255.255.255.0" | ||||||
|  | 
 | ||||||
|  | # VM Specifications | ||||||
|  | RAM_OPN="2048" | ||||||
|  | VCPUS_OPN="2" | ||||||
|  | DISK_OPN_GB="10" | ||||||
|  | OS_VARIANT_OPN="freebsd14.0" # Updated to a more recent FreeBSD variant | ||||||
|  | 
 | ||||||
|  | RAM_PXE="4096" | ||||||
|  | VCPUS_PXE="2" | ||||||
|  | DISK_PXE_GB="40" | ||||||
|  | OS_VARIANT_LINUX="centos-stream9" | ||||||
|  | 
 | ||||||
|  | OPN_IMG_URL="https://mirror.ams1.nl.leaseweb.net/opnsense/releases/25.7/OPNsense-25.7-serial-amd64.img.bz2" | ||||||
|  | OPN_IMG_PATH="${IMG_DIR}/OPNsense-25.7-serial-amd64.img" | ||||||
|  | CENTOS_ISO_URL="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/boot.iso" | ||||||
|  | CENTOS_ISO_PATH="${IMG_DIR}/CentOS-Stream-9-latest-boot.iso" | ||||||
|  | 
 | ||||||
|  | CONNECT_URI="qemu:///system" | ||||||
|  | 
 | ||||||
|  | download_if_missing() { | ||||||
|  |   local url="$1" | ||||||
|  |   local dest="$2" | ||||||
|  |   if [[ ! -f "$dest" ]]; then | ||||||
|  |     echo "Downloading $url to $dest" | ||||||
|  |     mkdir -p "$(dirname "$dest")" | ||||||
|  |     local tmp | ||||||
|  |     tmp="$(mktemp)" | ||||||
|  |     curl -L --progress-bar "$url" -o "$tmp" | ||||||
|  |     case "$url" in | ||||||
|  |       *.bz2) bunzip2 -c "$tmp" > "$dest" && rm -f "$tmp" ;; | ||||||
|  |       *) mv "$tmp" "$dest" ;; | ||||||
|  |     esac | ||||||
|  |   else | ||||||
|  |     echo "Already present: $dest" | ||||||
|  |   fi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # Ensures a libvirt network is defined and active | ||||||
|  | ensure_network() { | ||||||
|  |   local net_name="$1" | ||||||
|  |   local net_xml_path="$2" | ||||||
|  |   if virsh --connect "${CONNECT_URI}" net-info "${net_name}" >/dev/null 2>&1; then | ||||||
|  |     echo "Network ${net_name} already exists." | ||||||
|  |   else | ||||||
|  |     echo "Defining network ${net_name} from ${net_xml_path}" | ||||||
|  |     virsh --connect "${CONNECT_URI}" net-define "${net_xml_path}" | ||||||
|  |   fi | ||||||
|  | 
 | ||||||
|  |   if ! virsh --connect "${CONNECT_URI}" net-info "${net_name}" | grep "Active: *yes"; then | ||||||
|  |     echo "Starting network ${net_name}..." | ||||||
|  |     virsh --connect "${CONNECT_URI}" net-start "${net_name}" | ||||||
|  |     virsh --connect "${CONNECT_URI}" net-autostart "${net_name}" | ||||||
|  |   fi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # Destroys a VM completely | ||||||
|  | destroy_vm() { | ||||||
|  |   local vm_name="$1" | ||||||
|  |   if virsh --connect "${CONNECT_URI}" dominfo "$vm_name" >/dev/null 2>&1; then | ||||||
|  |     echo "Destroying and undefining VM: ${vm_name}" | ||||||
|  |     virsh --connect "${CONNECT_URI}" destroy "$vm_name" || true | ||||||
|  |     virsh --connect "${CONNECT_URI}" undefine "$vm_name" --nvram | ||||||
|  |   fi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # Destroys a libvirt network | ||||||
|  | destroy_network() { | ||||||
|  |   local net_name="$1" | ||||||
|  |   if virsh --connect "${CONNECT_URI}" net-info "$net_name" >/dev/null 2>&1; then | ||||||
|  |     echo "Destroying and undefining network: ${net_name}" | ||||||
|  |     virsh --connect "${CONNECT_URI}" net-destroy "$net_name" || true | ||||||
|  |     virsh --connect "${CONNECT_URI}" net-undefine "$net_name" | ||||||
|  |   fi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # --- Main Logic --- | ||||||
|  | create_lab_environment() { | ||||||
|  |   # Create network definition files | ||||||
|  |   cat > "${STATE_DIR}/default.xml" <<EOF | ||||||
|  | <network> | ||||||
|  |   <name>default</name> | ||||||
|  |   <forward mode='nat'/> | ||||||
|  |   <bridge name='virbr0' stp='on' delay='0'/> | ||||||
|  |   <ip address='192.168.122.1' netmask='255.255.255.0'> | ||||||
|  |     <dhcp> | ||||||
|  |       <range start='192.168.122.100' end='192.168.122.200'/> | ||||||
|  |     </dhcp> | ||||||
|  |   </ip> | ||||||
|  | </network> | ||||||
|  | EOF | ||||||
|  | 
 | ||||||
|  |   cat > "${STATE_DIR}/${NET_HARMONYLAN}.xml" <<EOF | ||||||
|  | <network> | ||||||
|  |   <name>${NET_HARMONYLAN}</name> | ||||||
|  |   <bridge name='virbr1' stp='on' delay='0'/> | ||||||
|  | </network> | ||||||
|  | EOF | ||||||
|  | 
 | ||||||
|  |   # Ensure both networks exist and are active | ||||||
|  |   ensure_network "default" "${STATE_DIR}/default.xml" | ||||||
|  |   ensure_network "${NET_HARMONYLAN}" "${STATE_DIR}/${NET_HARMONYLAN}.xml" | ||||||
|  | 
 | ||||||
|  |   # --- Create OPNsense VM (MODIFIED SECTION) --- | ||||||
|  |   local disk_opn="${IMG_DIR}/${VM_OPN}.qcow2" | ||||||
|  |   if [[ ! -f "$disk_opn" ]]; then | ||||||
|  |     qemu-img create -f qcow2 "$disk_opn" "${DISK_OPN_GB}G" | ||||||
|  |   fi | ||||||
|  | 
 | ||||||
|  |   echo "Creating OPNsense VM using serial image..." | ||||||
|  |   virt-install \ | ||||||
|  |     --connect "${CONNECT_URI}" \ | ||||||
|  |     --name "${VM_OPN}" \ | ||||||
|  |     --ram "${RAM_OPN}" \ | ||||||
|  |     --vcpus "${VCPUS_OPN}" \ | ||||||
|  |     --cpu host-passthrough \ | ||||||
|  |     --os-variant "${OS_VARIANT_OPN}" \ | ||||||
|  |     --graphics none \ | ||||||
|  |     --noautoconsole \ | ||||||
|  |     --disk path="${disk_opn}",device=disk,bus=virtio,boot.order=1 \ | ||||||
|  |     --disk path="${OPN_IMG_PATH}",device=disk,bus=usb,readonly=on,boot.order=2 \ | ||||||
|  |     --network network=default,model=virtio \ | ||||||
|  |     --network network="${NET_HARMONYLAN}",model=virtio \ | ||||||
|  |     --boot uefi,menu=on | ||||||
|  | 
 | ||||||
|  |   echo "OPNsense VM created. Connect with: sudo virsh console ${VM_OPN}" | ||||||
|  |   echo "The VM will boot from the serial installation image." | ||||||
|  |   echo "Login with user 'installer' and password 'opnsense' to start the installation." | ||||||
|  |   echo "Install onto the VirtIO disk (vtbd0)." | ||||||
|  |   echo "After installation, shutdown the VM, then run 'sudo virsh edit ${VM_OPN}' and remove the USB disk block to boot from the installed system." | ||||||
|  | 
 | ||||||
|  |   # --- Create PXE Client VM --- | ||||||
|  |   local disk_pxe="${IMG_DIR}/${VM_PXE}.qcow2" | ||||||
|  |   if [[ ! -f "$disk_pxe" ]]; then | ||||||
|  |     qemu-img create -f qcow2 "$disk_pxe" "${DISK_PXE_GB}G" | ||||||
|  |   fi | ||||||
|  | 
 | ||||||
|  |   echo "Creating PXE client VM..." | ||||||
|  |   virt-install \ | ||||||
|  |     --connect "${CONNECT_URI}" \ | ||||||
|  |     --name "${VM_PXE}" \ | ||||||
|  |     --ram "${RAM_PXE}" \ | ||||||
|  |     --vcpus "${VCPUS_PXE}" \ | ||||||
|  |     --cpu host-passthrough \ | ||||||
|  |     --os-variant "${OS_VARIANT_LINUX}" \ | ||||||
|  |     --graphics none \ | ||||||
|  |     --noautoconsole \ | ||||||
|  |     --disk path="${disk_pxe}",format=qcow2,bus=virtio \ | ||||||
|  |     --network network="${NET_HARMONYLAN}",model=virtio \ | ||||||
|  |     --pxe \ | ||||||
|  |     --boot uefi,menu=on | ||||||
|  | 
 | ||||||
|  |   echo "PXE VM created. It will attempt to netboot on ${NET_HARMONYLAN}." | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # --- Script Entrypoint --- | ||||||
|  | case "${1:-}" in | ||||||
|  |   up) | ||||||
|  |     mkdir -p "${IMG_DIR}" "${STATE_DIR}" | ||||||
|  |     download_if_missing "$OPN_IMG_URL" "$OPN_IMG_PATH" | ||||||
|  |     download_if_missing "$CENTOS_ISO_URL" "$CENTOS_ISO_PATH" | ||||||
|  |     create_lab_environment | ||||||
|  |     echo "Lab setup complete. Use 'sudo virsh list --all' to see VMs." | ||||||
|  |     ;; | ||||||
|  |   clean) | ||||||
|  |     destroy_vm "${VM_PXE}" | ||||||
|  |     destroy_vm "${VM_OPN}" | ||||||
|  |     destroy_network "${NET_HARMONYLAN}" | ||||||
|  |     # Optionally destroy the default network if you want a full reset | ||||||
|  |     # destroy_network "default" | ||||||
|  |     echo "Cleanup complete." | ||||||
|  |     ;; | ||||||
|  |   *) | ||||||
|  |     echo "Usage: sudo $0 {up|clean}" | ||||||
|  |     exit 1 | ||||||
|  |     ;; | ||||||
|  | esac | ||||||
| @ -7,8 +7,9 @@ license.workspace = true | |||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| env_logger.workspace = true | env_logger.workspace = true | ||||||
| harmony = { version = "0.1.0", path = "../../harmony" } | harmony = { path = "../../harmony" } | ||||||
| harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } | harmony_cli = { path = "../../harmony_cli" } | ||||||
|  | harmony_types = { path = "../../harmony_types" } | ||||||
| logging = "0.1.0" | logging = "0.1.0" | ||||||
| tokio.workspace = true | tokio.workspace = true | ||||||
| url.workspace = true | url.workspace = true | ||||||
|  | |||||||
| @ -1,15 +1,16 @@ | |||||||
| use std::{path::PathBuf, str::FromStr, sync::Arc}; | use std::{path::PathBuf, str::FromStr, sync::Arc}; | ||||||
| 
 | 
 | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     data::Id, |  | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
|         application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring}, |         application::{ApplicationScore, RustWebFramework, RustWebapp, features::Monitoring}, | ||||||
|         monitoring::alert_channel::webhook_receiver::WebhookReceiver, |         monitoring::alert_channel::webhook_receiver::WebhookReceiver, | ||||||
|         tenant::TenantScore, |         tenant::TenantScore, | ||||||
|     }, |     }, | ||||||
|     topology::{K8sAnywhereTopology, Url, tenant::TenantConfig}, |     topology::{K8sAnywhereTopology, tenant::TenantConfig}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -1,6 +1,9 @@ | |||||||
| use harmony::{ | use harmony::{ | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::dummy::{ErrorScore, PanicScore, SuccessScore}, |     modules::{ | ||||||
|  |         dummy::{ErrorScore, PanicScore, SuccessScore}, | ||||||
|  |         inventory::DiscoverInventoryAgentScore, | ||||||
|  |     }, | ||||||
|     topology::LocalhostTopology, |     topology::LocalhostTopology, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| @ -13,6 +16,9 @@ async fn main() { | |||||||
|             Box::new(SuccessScore {}), |             Box::new(SuccessScore {}), | ||||||
|             Box::new(ErrorScore {}), |             Box::new(ErrorScore {}), | ||||||
|             Box::new(PanicScore {}), |             Box::new(PanicScore {}), | ||||||
|  |             Box::new(DiscoverInventoryAgentScore { | ||||||
|  |                 discovery_timeout: Some(10), | ||||||
|  |             }), | ||||||
|         ], |         ], | ||||||
|         None, |         None, | ||||||
|     ) |     ) | ||||||
|  | |||||||
| @ -2,8 +2,9 @@ use harmony::{ | |||||||
|     data::Version, |     data::Version, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::lamp::{LAMPConfig, LAMPScore}, |     modules::lamp::{LAMPConfig, LAMPScore}, | ||||||
|     topology::{K8sAnywhereTopology, Url}, |     topology::K8sAnywhereTopology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -6,8 +6,9 @@ readme.workspace = true | |||||||
| license.workspace = true | license.workspace = true | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| harmony = { version = "0.1.0", path = "../../harmony" } | harmony = { path = "../../harmony" } | ||||||
| harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } | harmony_cli = { path = "../../harmony_cli" } | ||||||
| harmony_macros = { version = "0.1.0", path = "../../harmony_macros" } | harmony_macros = { path = "../../harmony_macros" } | ||||||
|  | harmony_types = { path = "../../harmony_types" } | ||||||
| tokio.workspace = true | tokio.workspace = true | ||||||
| url.workspace = true | url.workspace = true | ||||||
|  | |||||||
| @ -22,8 +22,9 @@ use harmony::{ | |||||||
|             k8s::pvc::high_pvc_fill_rate_over_two_days, |             k8s::pvc::high_pvc_fill_rate_over_two_days, | ||||||
|         }, |         }, | ||||||
|     }, |     }, | ||||||
|     topology::{K8sAnywhereTopology, Url}, |     topology::K8sAnywhereTopology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -7,7 +7,8 @@ license.workspace = true | |||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| cidr.workspace = true | cidr.workspace = true | ||||||
| harmony = { version = "0.1.0", path = "../../harmony" } | harmony = { path = "../../harmony" } | ||||||
| harmony_cli = { version = "0.1.0", path = "../../harmony_cli" } | harmony_cli = { path = "../../harmony_cli" } | ||||||
|  | harmony_types = { path = "../../harmony_types" } | ||||||
| tokio.workspace = true | tokio.workspace = true | ||||||
| url.workspace = true | url.workspace = true | ||||||
|  | |||||||
| @ -1,7 +1,6 @@ | |||||||
| use std::{collections::HashMap, str::FromStr}; | use std::{collections::HashMap, str::FromStr}; | ||||||
| 
 | 
 | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     data::Id, |  | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
|         monitoring::{ |         monitoring::{ | ||||||
| @ -19,10 +18,12 @@ use harmony::{ | |||||||
|         tenant::TenantScore, |         tenant::TenantScore, | ||||||
|     }, |     }, | ||||||
|     topology::{ |     topology::{ | ||||||
|         K8sAnywhereTopology, Url, |         K8sAnywhereTopology, | ||||||
|         tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy}, |         tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy}, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -18,9 +18,10 @@ use harmony::{ | |||||||
|         }, |         }, | ||||||
|         tftp::TftpScore, |         tftp::TftpScore, | ||||||
|     }, |     }, | ||||||
|     topology::{LogicalHost, UnmanagedRouter, Url}, |     topology::{LogicalHost, UnmanagedRouter}, | ||||||
| }; | }; | ||||||
| use harmony_macros::{ip, mac_address}; | use harmony_macros::{ip, mac_address}; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
| @ -86,8 +87,7 @@ async fn main() { | |||||||
|     let inventory = Inventory { |     let inventory = Inventory { | ||||||
|         location: Location::new("I am mobile".to_string(), "earth".to_string()), |         location: Location::new("I am mobile".to_string(), "earth".to_string()), | ||||||
|         switch: SwitchGroup::from([]), |         switch: SwitchGroup::from([]), | ||||||
|         firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall) |         firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), | ||||||
|             .management(Arc::new(OPNSenseManagementInterface::new()))]), |  | ||||||
|         storage_host: vec![], |         storage_host: vec![], | ||||||
|         worker_host: vec![ |         worker_host: vec![ | ||||||
|             PhysicalHost::empty(HostCategory::Server) |             PhysicalHost::empty(HostCategory::Server) | ||||||
| @ -125,9 +125,12 @@ async fn main() { | |||||||
|         harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); |         harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); | ||||||
| 
 | 
 | ||||||
|     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); |     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); | ||||||
|     let http_score = StaticFilesHttpScore::new(Url::LocalFolder( |     let http_score = StaticFilesHttpScore { | ||||||
|  |         folder_to_serve: Some(Url::LocalFolder( | ||||||
|             "./data/watchguard/pxe-http-files".to_string(), |             "./data/watchguard/pxe-http-files".to_string(), | ||||||
|     )); |         )), | ||||||
|  |         files: vec![], | ||||||
|  |     }; | ||||||
|     let ipxe_score = IpxeScore::new(); |     let ipxe_score = IpxeScore::new(); | ||||||
| 
 | 
 | ||||||
|     harmony_tui::run( |     harmony_tui::run( | ||||||
|  | |||||||
							
								
								
									
										21
									
								
								examples/okd_pxe/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								examples/okd_pxe/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | |||||||
|  | [package] | ||||||
|  | name = "example-pxe" | ||||||
|  | edition = "2024" | ||||||
|  | version.workspace = true | ||||||
|  | readme.workspace = true | ||||||
|  | license.workspace = true | ||||||
|  | publish = false | ||||||
|  | 
 | ||||||
|  | [dependencies] | ||||||
|  | harmony = { path = "../../harmony" } | ||||||
|  | harmony_cli = { path = "../../harmony_cli" } | ||||||
|  | harmony_types = { path = "../../harmony_types" } | ||||||
|  | harmony_secret = { path = "../../harmony_secret" } | ||||||
|  | harmony_secret_derive = { path = "../../harmony_secret_derive" } | ||||||
|  | cidr = { workspace = true } | ||||||
|  | tokio = { workspace = true } | ||||||
|  | harmony_macros = { path = "../../harmony_macros" } | ||||||
|  | log = { workspace = true } | ||||||
|  | env_logger = { workspace = true } | ||||||
|  | url = { workspace = true } | ||||||
|  | serde.workspace = true | ||||||
							
								
								
									
										24
									
								
								examples/okd_pxe/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								examples/okd_pxe/src/main.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,24 @@ | |||||||
|  | mod topology; | ||||||
|  | 
 | ||||||
|  | use crate::topology::{get_inventory, get_topology}; | ||||||
|  | use harmony::modules::okd::ipxe::OkdIpxeScore; | ||||||
|  | 
 | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() { | ||||||
|  |     let inventory = get_inventory(); | ||||||
|  |     let topology = get_topology().await; | ||||||
|  | 
 | ||||||
|  |     let kickstart_filename = "inventory.kickstart".to_string(); | ||||||
|  |     let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string(); | ||||||
|  |     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); | ||||||
|  | 
 | ||||||
|  |     let ipxe_score = OkdIpxeScore { | ||||||
|  |         kickstart_filename, | ||||||
|  |         harmony_inventory_agent, | ||||||
|  |         cluster_pubkey_filename, | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  | } | ||||||
							
								
								
									
										77
									
								
								examples/okd_pxe/src/topology.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								examples/okd_pxe/src/topology.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,77 @@ | |||||||
|  | use cidr::Ipv4Cidr; | ||||||
|  | use harmony::{ | ||||||
|  |     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||||
|  |     infra::opnsense::OPNSenseManagementInterface, | ||||||
|  |     inventory::Inventory, | ||||||
|  |     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||||
|  | }; | ||||||
|  | use harmony_macros::{ip, ipv4}; | ||||||
|  | use harmony_secret::{Secret, SecretManager}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use std::{net::IpAddr, sync::Arc}; | ||||||
|  | 
 | ||||||
|  | #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||||
|  | struct OPNSenseFirewallConfig { | ||||||
|  |     username: String, | ||||||
|  |     password: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub async fn get_topology() -> HAClusterTopology { | ||||||
|  |     let firewall = harmony::topology::LogicalHost { | ||||||
|  |         ip: ip!("192.168.1.1"), | ||||||
|  |         name: String::from("opnsense-1"), | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     let config = SecretManager::get::<OPNSenseFirewallConfig>().await; | ||||||
|  |     let config = config.unwrap(); | ||||||
|  | 
 | ||||||
|  |     let opnsense = Arc::new( | ||||||
|  |         harmony::infra::opnsense::OPNSenseFirewall::new( | ||||||
|  |             firewall, | ||||||
|  |             None, | ||||||
|  |             &config.username, | ||||||
|  |             &config.password, | ||||||
|  |         ) | ||||||
|  |         .await, | ||||||
|  |     ); | ||||||
|  |     let lan_subnet = ipv4!("192.168.1.0"); | ||||||
|  |     let gateway_ipv4 = ipv4!("192.168.1.1"); | ||||||
|  |     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||||
|  |     harmony::topology::HAClusterTopology { | ||||||
|  |         domain_name: "demo.harmony.mcd".to_string(), | ||||||
|  |         router: Arc::new(UnmanagedRouter::new( | ||||||
|  |             gateway_ip, | ||||||
|  |             Ipv4Cidr::new(lan_subnet, 24).unwrap(), | ||||||
|  |         )), | ||||||
|  |         load_balancer: opnsense.clone(), | ||||||
|  |         firewall: opnsense.clone(), | ||||||
|  |         tftp_server: opnsense.clone(), | ||||||
|  |         http_server: opnsense.clone(), | ||||||
|  |         dhcp_server: opnsense.clone(), | ||||||
|  |         dns_server: opnsense.clone(), | ||||||
|  |         control_plane: vec![LogicalHost { | ||||||
|  |             ip: ip!("10.100.8.20"), | ||||||
|  |             name: "cp0".to_string(), | ||||||
|  |         }], | ||||||
|  |         bootstrap_host: LogicalHost { | ||||||
|  |             ip: ip!("10.100.8.20"), | ||||||
|  |             name: "cp0".to_string(), | ||||||
|  |         }, | ||||||
|  |         workers: vec![], | ||||||
|  |         switch: vec![], | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn get_inventory() -> Inventory { | ||||||
|  |     Inventory { | ||||||
|  |         location: Location::new( | ||||||
|  |             "Some virtual machine or maybe a physical machine if you're cool".to_string(), | ||||||
|  |             "testopnsense".to_string(), | ||||||
|  |         ), | ||||||
|  |         switch: SwitchGroup::from([]), | ||||||
|  |         firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), | ||||||
|  |         storage_host: vec![], | ||||||
|  |         worker_host: vec![], | ||||||
|  |         control_plane_host: vec![], | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										7
									
								
								examples/okd_pxe/ssh_example_key
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								examples/okd_pxe/ssh_example_key
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,7 @@ | |||||||
|  | -----BEGIN OPENSSH PRIVATE KEY----- | ||||||
|  | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW | ||||||
|  | QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA | ||||||
|  | jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA | ||||||
|  | AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ | ||||||
|  | /dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ== | ||||||
|  | -----END OPENSSH PRIVATE KEY----- | ||||||
							
								
								
									
										1
									
								
								examples/okd_pxe/ssh_example_key.pub
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								examples/okd_pxe/ssh_example_key.pub
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2 | ||||||
| @ -15,9 +15,10 @@ use harmony::{ | |||||||
|         opnsense::OPNsenseShellCommandScore, |         opnsense::OPNsenseShellCommandScore, | ||||||
|         tftp::TftpScore, |         tftp::TftpScore, | ||||||
|     }, |     }, | ||||||
|     topology::{LogicalHost, UnmanagedRouter, Url}, |     topology::{LogicalHost, UnmanagedRouter}, | ||||||
| }; | }; | ||||||
| use harmony_macros::{ip, mac_address}; | use harmony_macros::{ip, mac_address}; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
| @ -62,8 +63,7 @@ async fn main() { | |||||||
|             "wk".to_string(), |             "wk".to_string(), | ||||||
|         ), |         ), | ||||||
|         switch: SwitchGroup::from([]), |         switch: SwitchGroup::from([]), | ||||||
|         firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall) |         firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), | ||||||
|             .management(Arc::new(OPNSenseManagementInterface::new()))]), |  | ||||||
|         storage_host: vec![], |         storage_host: vec![], | ||||||
|         worker_host: vec![], |         worker_host: vec![], | ||||||
|         control_plane_host: vec![ |         control_plane_host: vec![ | ||||||
| @ -80,9 +80,12 @@ async fn main() { | |||||||
|     let load_balancer_score = OKDLoadBalancerScore::new(&topology); |     let load_balancer_score = OKDLoadBalancerScore::new(&topology); | ||||||
| 
 | 
 | ||||||
|     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); |     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); | ||||||
|     let http_score = StaticFilesHttpScore::new(Url::LocalFolder( |     let http_score = StaticFilesHttpScore { | ||||||
|  |         folder_to_serve: Some(Url::LocalFolder( | ||||||
|             "./data/watchguard/pxe-http-files".to_string(), |             "./data/watchguard/pxe-http-files".to_string(), | ||||||
|     )); |         )), | ||||||
|  |         files: vec![], | ||||||
|  |     }; | ||||||
| 
 | 
 | ||||||
|     harmony_tui::run( |     harmony_tui::run( | ||||||
|         inventory, |         inventory, | ||||||
|  | |||||||
| @ -11,8 +11,9 @@ use harmony::{ | |||||||
|             discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, |             discord_alert_channel::DiscordWebhook, webhook_receiver::WebhookReceiver, | ||||||
|         }, |         }, | ||||||
|     }, |     }, | ||||||
|     topology::{K8sAnywhereTopology, Url}, |     topology::K8sAnywhereTopology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -1,11 +1,11 @@ | |||||||
| use std::str::FromStr; | use std::str::FromStr; | ||||||
| 
 | 
 | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     data::Id, |  | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::tenant::TenantScore, |     modules::tenant::TenantScore, | ||||||
|     topology::{K8sAnywhereTopology, tenant::TenantConfig}, |     topology::{K8sAnywhereTopology, tenant::TenantConfig}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
|  | |||||||
| @ -9,10 +9,8 @@ license.workspace = true | |||||||
| testing = [] | testing = [] | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| rand = "0.9" |  | ||||||
| hex = "0.4" | hex = "0.4" | ||||||
| libredfish = "0.1.1" | reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"], default-features = false } | ||||||
| reqwest = { version = "0.11", features = ["blocking", "json"] } |  | ||||||
| russh = "0.45.0" | russh = "0.45.0" | ||||||
| rust-ipmi = "0.1.1" | rust-ipmi = "0.1.1" | ||||||
| semver = "1.0.23" | semver = "1.0.23" | ||||||
| @ -66,9 +64,14 @@ kube-derive = "1.1.0" | |||||||
| bollard.workspace = true | bollard.workspace = true | ||||||
| tar.workspace = true | tar.workspace = true | ||||||
| base64.workspace = true | base64.workspace = true | ||||||
|  | thiserror.workspace = true | ||||||
| once_cell = "1.21.3" | once_cell = "1.21.3" | ||||||
| harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" } | harmony-secret-derive = { version = "0.1.0", path = "../harmony_secret_derive" } | ||||||
| walkdir = "2.5.0" | walkdir = "2.5.0" | ||||||
|  | harmony_inventory_agent = { path = "../harmony_inventory_agent" } | ||||||
|  | harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } | ||||||
|  | askama.workspace = true | ||||||
|  | sqlx.workspace = true | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| pretty_assertions.workspace = true | pretty_assertions.workspace = true | ||||||
|  | |||||||
| @ -12,4 +12,12 @@ lazy_static! { | |||||||
|         std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string()); |         std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string()); | ||||||
|     pub static ref DRY_RUN: bool = |     pub static ref DRY_RUN: bool = | ||||||
|         std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false)); |         std::env::var("HARMONY_DRY_RUN").is_ok_and(|value| value.parse().unwrap_or(false)); | ||||||
|  |     pub static ref DEFAULT_DATABASE_URL: String = "sqlite://harmony.sqlite".to_string(); | ||||||
|  |     pub static ref DATABASE_URL: String = std::env::var("HARMONY_DATABASE_URL") | ||||||
|  |         .map(|value| if value.is_empty() { | ||||||
|  |             (*DEFAULT_DATABASE_URL).clone() | ||||||
|  |         } else { | ||||||
|  |             value | ||||||
|  |         }) | ||||||
|  |         .unwrap_or((*DEFAULT_DATABASE_URL).clone()); | ||||||
| } | } | ||||||
|  | |||||||
							
								
								
									
										22
									
								
								harmony/src/domain/data/file.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								harmony/src/domain/data/file.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,22 @@ | |||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
|  | pub struct FileContent { | ||||||
|  |     pub path: FilePath, | ||||||
|  |     pub content: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
|  | pub enum FilePath { | ||||||
|  |     Relative(String), | ||||||
|  |     Absolute(String), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl std::fmt::Display for FilePath { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         match self { | ||||||
|  |             FilePath::Relative(path) => f.write_fmt(format_args!("./{path}")), | ||||||
|  |             FilePath::Absolute(path) => f.write_fmt(format_args!("/{path}")), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -24,6 +24,14 @@ pub struct Id { | |||||||
|     value: String, |     value: String, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl Id { | ||||||
|  |     pub fn empty() -> Self { | ||||||
|  |         Id { | ||||||
|  |             value: String::new(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl FromStr for Id { | impl FromStr for Id { | ||||||
|     type Err = (); |     type Err = (); | ||||||
| 
 | 
 | ||||||
| @ -34,6 +42,12 @@ impl FromStr for Id { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl From<String> for Id { | ||||||
|  |     fn from(value: String) -> Self { | ||||||
|  |         Self { value } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl std::fmt::Display for Id { | impl std::fmt::Display for Id { | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|         f.write_str(&self.value) |         f.write_str(&self.value) | ||||||
|  | |||||||
| @ -1,4 +1,4 @@ | |||||||
| mod id; | mod file; | ||||||
| mod version; | mod version; | ||||||
| pub use id::*; | pub use file::*; | ||||||
| pub use version::*; | pub use version::*; | ||||||
|  | |||||||
| @ -1,8 +1,7 @@ | |||||||
| use std::fmt; | use std::fmt; | ||||||
| 
 | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| 
 | use harmony_types::net::IpAddress; | ||||||
| use super::topology::IpAddress; |  | ||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| pub enum ExecutorError { | pub enum ExecutorError { | ||||||
|  | |||||||
| @ -1,38 +1,156 @@ | |||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| 
 | 
 | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
|  | use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive}; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::MacAddress; | ||||||
| use serde::{Serialize, Serializer, ser::SerializeStruct}; | use serde::{Deserialize, Serialize}; | ||||||
| use serde_value::Value; | use serde_value::Value; | ||||||
| 
 | 
 | ||||||
| pub type HostGroup = Vec<PhysicalHost>; | pub type HostGroup = Vec<PhysicalHost>; | ||||||
| pub type SwitchGroup = Vec<Switch>; | pub type SwitchGroup = Vec<Switch>; | ||||||
| pub type FirewallGroup = Vec<PhysicalHost>; | pub type FirewallGroup = Vec<PhysicalHost>; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct PhysicalHost { | pub struct PhysicalHost { | ||||||
|  |     pub id: Id, | ||||||
|     pub category: HostCategory, |     pub category: HostCategory, | ||||||
|     pub network: Vec<NetworkInterface>, |     pub network: Vec<NetworkInterface>, | ||||||
|     pub management: Arc<dyn ManagementInterface>, |     pub storage: Vec<StorageDrive>, | ||||||
|     pub storage: Vec<Storage>, |  | ||||||
|     pub labels: Vec<Label>, |     pub labels: Vec<Label>, | ||||||
|     pub memory_size: Option<u64>, |     pub memory_modules: Vec<MemoryModule>, | ||||||
|     pub cpu_count: Option<u64>, |     pub cpus: Vec<CPU>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl PhysicalHost { | impl PhysicalHost { | ||||||
|     pub fn empty(category: HostCategory) -> Self { |     pub fn empty(category: HostCategory) -> Self { | ||||||
|         Self { |         Self { | ||||||
|  |             id: Id::empty(), | ||||||
|             category, |             category, | ||||||
|             network: vec![], |             network: vec![], | ||||||
|             storage: vec![], |             storage: vec![], | ||||||
|             labels: vec![], |             labels: vec![], | ||||||
|             management: Arc::new(ManualManagementInterface {}), |             memory_modules: vec![], | ||||||
|             memory_size: None, |             cpus: vec![], | ||||||
|             cpu_count: None, |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn summary(&self) -> String { | ||||||
|  |         let mut parts = Vec::new(); | ||||||
|  | 
 | ||||||
|  |         // Part 1: System Model (from labels) or Category as a fallback
 | ||||||
|  |         let model = self | ||||||
|  |             .labels | ||||||
|  |             .iter() | ||||||
|  |             .find(|l| l.name == "system-product-name" || l.name == "model") | ||||||
|  |             .map(|l| l.value.clone()) | ||||||
|  |             .unwrap_or_else(|| self.category.to_string()); | ||||||
|  |         parts.push(model); | ||||||
|  | 
 | ||||||
|  |         // Part 2: CPU Information
 | ||||||
|  |         if !self.cpus.is_empty() { | ||||||
|  |             let cpu_count = self.cpus.len(); | ||||||
|  |             let total_cores = self.cpus.iter().map(|c| c.cores).sum::<u32>(); | ||||||
|  |             let total_threads = self.cpus.iter().map(|c| c.threads).sum::<u32>(); | ||||||
|  |             let model_name = &self.cpus[0].model; | ||||||
|  | 
 | ||||||
|  |             let cpu_summary = if cpu_count > 1 { | ||||||
|  |                 format!( | ||||||
|  |                     "{}x {} ({}c/{}t)", | ||||||
|  |                     cpu_count, model_name, total_cores, total_threads | ||||||
|  |                 ) | ||||||
|  |             } else { | ||||||
|  |                 format!("{} ({}c/{}t)", model_name, total_cores, total_threads) | ||||||
|  |             }; | ||||||
|  |             parts.push(cpu_summary); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Part 3: Memory Information
 | ||||||
|  |         if !self.memory_modules.is_empty() { | ||||||
|  |             let total_mem_bytes = self | ||||||
|  |                 .memory_modules | ||||||
|  |                 .iter() | ||||||
|  |                 .map(|m| m.size_bytes) | ||||||
|  |                 .sum::<u64>(); | ||||||
|  |             let total_mem_gb = (total_mem_bytes as f64 / (1024.0 * 1024.0 * 1024.0)).round() as u64; | ||||||
|  | 
 | ||||||
|  |             // Find the most common speed among modules
 | ||||||
|  |             let mut speeds = std::collections::HashMap::new(); | ||||||
|  |             for module in &self.memory_modules { | ||||||
|  |                 if let Some(speed) = module.speed_mhz { | ||||||
|  |                     *speeds.entry(speed).or_insert(0) += 1; | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             let common_speed = speeds | ||||||
|  |                 .into_iter() | ||||||
|  |                 .max_by_key(|&(_, count)| count) | ||||||
|  |                 .map(|(speed, _)| speed); | ||||||
|  | 
 | ||||||
|  |             if let Some(speed) = common_speed { | ||||||
|  |                 parts.push(format!("{} GB RAM @ {}MHz", total_mem_gb, speed)); | ||||||
|  |             } else { | ||||||
|  |                 parts.push(format!("{} GB RAM", total_mem_gb)); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Part 4: Storage Information
 | ||||||
|  |         if !self.storage.is_empty() { | ||||||
|  |             let total_storage_bytes = self.storage.iter().map(|d| d.size_bytes).sum::<u64>(); | ||||||
|  |             let drive_count = self.storage.len(); | ||||||
|  |             let first_drive_model = &self.storage[0].model; | ||||||
|  | 
 | ||||||
|  |             // Helper to format bytes into TB or GB
 | ||||||
|  |             let format_storage = |bytes: u64| { | ||||||
|  |                 let tb = bytes as f64 / (1024.0 * 1024.0 * 1024.0 * 1024.0); | ||||||
|  |                 if tb >= 1.0 { | ||||||
|  |                     format!("{:.2} TB", tb) | ||||||
|  |                 } else { | ||||||
|  |                     let gb = bytes as f64 / (1024.0 * 1024.0 * 1024.0); | ||||||
|  |                     format!("{:.0} GB", gb) | ||||||
|  |                 } | ||||||
|  |             }; | ||||||
|  | 
 | ||||||
|  |             let storage_summary = if drive_count > 1 { | ||||||
|  |                 format!( | ||||||
|  |                     "{} Storage ({}x {})", | ||||||
|  |                     format_storage(total_storage_bytes), | ||||||
|  |                     drive_count, | ||||||
|  |                     first_drive_model | ||||||
|  |                 ) | ||||||
|  |             } else { | ||||||
|  |                 format!( | ||||||
|  |                     "{} Storage ({})", | ||||||
|  |                     format_storage(total_storage_bytes), | ||||||
|  |                     first_drive_model | ||||||
|  |                 ) | ||||||
|  |             }; | ||||||
|  |             parts.push(storage_summary); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Part 5: Network Information
 | ||||||
|  |         // Prioritize an "up" interface with an IPv4 address
 | ||||||
|  |         let best_nic = self | ||||||
|  |             .network | ||||||
|  |             .iter() | ||||||
|  |             .find(|n| n.is_up && !n.ipv4_addresses.is_empty()) | ||||||
|  |             .or_else(|| self.network.first()); | ||||||
|  | 
 | ||||||
|  |         if let Some(nic) = best_nic { | ||||||
|  |             let speed = nic | ||||||
|  |                 .speed_mbps | ||||||
|  |                 .map(|s| format!("{}Gbps", s / 1000)) | ||||||
|  |                 .unwrap_or_else(|| "N/A".to_string()); | ||||||
|  |             let mac = nic.mac_address.to_string(); | ||||||
|  |             let nic_summary = if let Some(ip) = nic.ipv4_addresses.first() { | ||||||
|  |                 format!("NIC: {} ({}, {})", speed, ip, mac) | ||||||
|  |             } else { | ||||||
|  |                 format!("NIC: {} ({})", speed, mac) | ||||||
|  |             }; | ||||||
|  |             parts.push(nic_summary); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         parts.join(" | ") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn cluster_mac(&self) -> MacAddress { |     pub fn cluster_mac(&self) -> MacAddress { | ||||||
|         self.network |         self.network | ||||||
|             .first() |             .first() | ||||||
| @ -40,37 +158,17 @@ impl PhysicalHost { | |||||||
|             .mac_address |             .mac_address | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn cpu(mut self, cpu_count: Option<u64>) -> Self { |  | ||||||
|         self.cpu_count = cpu_count; |  | ||||||
|         self |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     pub fn memory_size(mut self, memory_size: Option<u64>) -> Self { |  | ||||||
|         self.memory_size = memory_size; |  | ||||||
|         self |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     pub fn storage( |  | ||||||
|         mut self, |  | ||||||
|         connection: StorageConnectionType, |  | ||||||
|         kind: StorageKind, |  | ||||||
|         size: u64, |  | ||||||
|         serial: String, |  | ||||||
|     ) -> Self { |  | ||||||
|         self.storage.push(Storage { |  | ||||||
|             connection, |  | ||||||
|             kind, |  | ||||||
|             size, |  | ||||||
|             serial, |  | ||||||
|         }); |  | ||||||
|         self |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     pub fn mac_address(mut self, mac_address: MacAddress) -> Self { |     pub fn mac_address(mut self, mac_address: MacAddress) -> Self { | ||||||
|         self.network.push(NetworkInterface { |         self.network.push(NetworkInterface { | ||||||
|             name: None, |             name: String::new(), | ||||||
|             mac_address, |             mac_address, | ||||||
|             speed: None, |             speed_mbps: None, | ||||||
|  |             is_up: false, | ||||||
|  |             mtu: 0, | ||||||
|  |             ipv4_addresses: vec![], | ||||||
|  |             ipv6_addresses: vec![], | ||||||
|  |             driver: String::new(), | ||||||
|  |             firmware_version: None, | ||||||
|         }); |         }); | ||||||
|         self |         self | ||||||
|     } |     } | ||||||
| @ -79,52 +177,56 @@ impl PhysicalHost { | |||||||
|         self.labels.push(Label { name, value }); |         self.labels.push(Label { name, value }); | ||||||
|         self |         self | ||||||
|     } |     } | ||||||
| 
 |  | ||||||
|     pub fn management(mut self, management: Arc<dyn ManagementInterface>) -> Self { |  | ||||||
|         self.management = management; |  | ||||||
|         self |  | ||||||
|     } |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Custom Serialize implementation for PhysicalHost
 | // Custom Serialize implementation for PhysicalHost
 | ||||||
| impl Serialize for PhysicalHost { | // impl Serialize for PhysicalHost {
 | ||||||
|     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | //     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
 | ||||||
|  | //     where
 | ||||||
|  | //         S: Serializer,
 | ||||||
|  | //     {
 | ||||||
|  | //         // Determine the number of fields
 | ||||||
|  | //         let mut num_fields = 5; // category, network, storage, labels, management
 | ||||||
|  | //         if self.memory_modules.is_some() {
 | ||||||
|  | //             num_fields += 1;
 | ||||||
|  | //         }
 | ||||||
|  | //         if self.cpus.is_some() {
 | ||||||
|  | //             num_fields += 1;
 | ||||||
|  | //         }
 | ||||||
|  | //
 | ||||||
|  | //         // Create a serialization structure
 | ||||||
|  | //         let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?;
 | ||||||
|  | //
 | ||||||
|  | //         // Serialize the standard fields
 | ||||||
|  | //         state.serialize_field("category", &self.category)?;
 | ||||||
|  | //         state.serialize_field("network", &self.network)?;
 | ||||||
|  | //         state.serialize_field("storage", &self.storage)?;
 | ||||||
|  | //         state.serialize_field("labels", &self.labels)?;
 | ||||||
|  | //
 | ||||||
|  | //         // Serialize optional fields
 | ||||||
|  | //         if let Some(memory) = self.memory_modules {
 | ||||||
|  | //             state.serialize_field("memory_size", &memory)?;
 | ||||||
|  | //         }
 | ||||||
|  | //         if let Some(cpu) = self.cpus {
 | ||||||
|  | //             state.serialize_field("cpu_count", &cpu)?;
 | ||||||
|  | //         }
 | ||||||
|  | //
 | ||||||
|  | //         let mgmt_data = self.management.serialize_management();
 | ||||||
|  | //         // pub management: Arc<dyn ManagementInterface>,
 | ||||||
|  | //
 | ||||||
|  | //         // Handle management interface - either as a field or flattened
 | ||||||
|  | //         state.serialize_field("management", &mgmt_data)?;
 | ||||||
|  | //
 | ||||||
|  | //         state.end()
 | ||||||
|  | //     }
 | ||||||
|  | // }
 | ||||||
|  | 
 | ||||||
|  | impl<'de> Deserialize<'de> for PhysicalHost { | ||||||
|  |     fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> | ||||||
|     where |     where | ||||||
|         S: Serializer, |         D: serde::Deserializer<'de>, | ||||||
|     { |     { | ||||||
|         // Determine the number of fields
 |         todo!() | ||||||
|         let mut num_fields = 5; // category, network, storage, labels, management
 |  | ||||||
|         if self.memory_size.is_some() { |  | ||||||
|             num_fields += 1; |  | ||||||
|         } |  | ||||||
|         if self.cpu_count.is_some() { |  | ||||||
|             num_fields += 1; |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         // Create a serialization structure
 |  | ||||||
|         let mut state = serializer.serialize_struct("PhysicalHost", num_fields)?; |  | ||||||
| 
 |  | ||||||
|         // Serialize the standard fields
 |  | ||||||
|         state.serialize_field("category", &self.category)?; |  | ||||||
|         state.serialize_field("network", &self.network)?; |  | ||||||
|         state.serialize_field("storage", &self.storage)?; |  | ||||||
|         state.serialize_field("labels", &self.labels)?; |  | ||||||
| 
 |  | ||||||
|         // Serialize optional fields
 |  | ||||||
|         if let Some(memory) = self.memory_size { |  | ||||||
|             state.serialize_field("memory_size", &memory)?; |  | ||||||
|         } |  | ||||||
|         if let Some(cpu) = self.cpu_count { |  | ||||||
|             state.serialize_field("cpu_count", &cpu)?; |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         let mgmt_data = self.management.serialize_management(); |  | ||||||
|         // pub management: Arc<dyn ManagementInterface>,
 |  | ||||||
| 
 |  | ||||||
|         // Handle management interface - either as a field or flattened
 |  | ||||||
|         state.serialize_field("management", &mgmt_data)?; |  | ||||||
| 
 |  | ||||||
|         state.end() |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -178,59 +280,10 @@ pub enum HostCategory { | |||||||
|     Switch, |     Switch, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] |  | ||||||
| pub struct NetworkInterface { |  | ||||||
|     pub name: Option<String>, |  | ||||||
|     pub mac_address: MacAddress, |  | ||||||
|     pub speed: Option<u64>, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| use harmony_macros::mac_address; | use harmony_macros::mac_address; | ||||||
| #[cfg(test)] |  | ||||||
| impl NetworkInterface { |  | ||||||
|     pub fn dummy() -> Self { |  | ||||||
|         Self { |  | ||||||
|             name: Some(String::new()), |  | ||||||
|             mac_address: mac_address!("00:00:00:00:00:00"), |  | ||||||
|             speed: Some(0), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | use harmony_types::id::Id; | ||||||
| pub enum StorageConnectionType { |  | ||||||
|     Sata3g, |  | ||||||
|     Sata6g, |  | ||||||
|     Sas6g, |  | ||||||
|     Sas12g, |  | ||||||
|     PCIE, |  | ||||||
| } |  | ||||||
| #[derive(Debug, Clone, Serialize)] |  | ||||||
| pub enum StorageKind { |  | ||||||
|     SSD, |  | ||||||
|     NVME, |  | ||||||
|     HDD, |  | ||||||
| } |  | ||||||
| #[derive(Debug, new, Clone, Serialize)] |  | ||||||
| pub struct Storage { |  | ||||||
|     pub connection: StorageConnectionType, |  | ||||||
|     pub kind: StorageKind, |  | ||||||
|     pub size: u64, |  | ||||||
|     pub serial: String, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[cfg(test)] |  | ||||||
| impl Storage { |  | ||||||
|     pub fn dummy() -> Self { |  | ||||||
|         Self { |  | ||||||
|             connection: StorageConnectionType::Sata3g, |  | ||||||
|             kind: StorageKind::SSD, |  | ||||||
|             size: 0, |  | ||||||
|             serial: String::new(), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct Switch { | pub struct Switch { | ||||||
| @ -261,146 +314,65 @@ impl Location { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl std::fmt::Display for HostCategory { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         match self { | ||||||
|  |             HostCategory::Server => write!(f, "Server"), | ||||||
|  |             HostCategory::Firewall => write!(f, "Firewall"), | ||||||
|  |             HostCategory::Switch => write!(f, "Switch"), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl std::fmt::Display for Label { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         write!(f, "{}: {}", self.name, self.value) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl std::fmt::Display for Location { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         write!(f, "Address: {}, Name: {}", self.address, self.name) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl std::fmt::Display for PhysicalHost { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         write!(f, "{}", self.summary()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl std::fmt::Display for Switch { | ||||||
|  |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         write!(f, "Switch with {} interfaces", self._interface.len()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| mod tests { | mod tests { | ||||||
|     use super::*; |     use super::*; | ||||||
|     use serde::{Deserialize, Serialize}; |  | ||||||
|     use std::sync::Arc; |  | ||||||
| 
 |  | ||||||
|     // Mock implementation of ManagementInterface
 |  | ||||||
|     #[derive(Debug, Clone, Serialize, Deserialize)] |  | ||||||
|     struct MockHPIlo { |  | ||||||
|         ip: String, |  | ||||||
|         username: String, |  | ||||||
|         password: String, |  | ||||||
|         firmware_version: String, |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     impl ManagementInterface for MockHPIlo { |  | ||||||
|         fn boot_to_pxe(&self) {} |  | ||||||
| 
 |  | ||||||
|         fn get_supported_protocol_names(&self) -> String { |  | ||||||
|             String::new() |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     // Another mock implementation
 |  | ||||||
|     #[derive(Debug, Clone, Serialize, Deserialize)] |  | ||||||
|     struct MockDellIdrac { |  | ||||||
|         hostname: String, |  | ||||||
|         port: u16, |  | ||||||
|         api_token: String, |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     impl ManagementInterface for MockDellIdrac { |  | ||||||
|         fn boot_to_pxe(&self) {} |  | ||||||
| 
 |  | ||||||
|         fn get_supported_protocol_names(&self) -> String { |  | ||||||
|             String::new() |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     #[test] |  | ||||||
|     fn test_serialize_physical_host_with_hp_ilo() { |  | ||||||
|         // Create a PhysicalHost with HP iLO management
 |  | ||||||
|         let host = PhysicalHost { |  | ||||||
|             category: HostCategory::Server, |  | ||||||
|             network: vec![NetworkInterface::dummy()], |  | ||||||
|             management: Arc::new(MockHPIlo { |  | ||||||
|                 ip: "192.168.1.100".to_string(), |  | ||||||
|                 username: "admin".to_string(), |  | ||||||
|                 password: "password123".to_string(), |  | ||||||
|                 firmware_version: "2.5.0".to_string(), |  | ||||||
|             }), |  | ||||||
|             storage: vec![Storage::dummy()], |  | ||||||
|             labels: vec![Label::new("datacenter".to_string(), "us-east".to_string())], |  | ||||||
|             memory_size: Some(64_000_000), |  | ||||||
|             cpu_count: Some(16), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         // Serialize to JSON
 |  | ||||||
|         let json = serde_json::to_string(&host).expect("Failed to serialize host"); |  | ||||||
| 
 |  | ||||||
|         // Check that the serialized JSON contains the HP iLO details
 |  | ||||||
|         assert!(json.contains("192.168.1.100")); |  | ||||||
|         assert!(json.contains("admin")); |  | ||||||
|         assert!(json.contains("password123")); |  | ||||||
|         assert!(json.contains("firmware_version")); |  | ||||||
|         assert!(json.contains("2.5.0")); |  | ||||||
| 
 |  | ||||||
|         // Parse back to verify structure (not the exact management interface)
 |  | ||||||
|         let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON"); |  | ||||||
| 
 |  | ||||||
|         // Verify basic structure
 |  | ||||||
|         assert_eq!(parsed["cpu_count"], 16); |  | ||||||
|         assert_eq!(parsed["memory_size"], 64_000_000); |  | ||||||
|         assert_eq!(parsed["network"][0]["name"], ""); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     #[test] |  | ||||||
|     fn test_serialize_physical_host_with_dell_idrac() { |  | ||||||
|         // Create a PhysicalHost with Dell iDRAC management
 |  | ||||||
|         let host = PhysicalHost { |  | ||||||
|             category: HostCategory::Server, |  | ||||||
|             network: vec![NetworkInterface::dummy()], |  | ||||||
|             management: Arc::new(MockDellIdrac { |  | ||||||
|                 hostname: "idrac-server01".to_string(), |  | ||||||
|                 port: 443, |  | ||||||
|                 api_token: "abcdef123456".to_string(), |  | ||||||
|             }), |  | ||||||
|             storage: vec![Storage::dummy()], |  | ||||||
|             labels: vec![Label::new("env".to_string(), "production".to_string())], |  | ||||||
|             memory_size: Some(128_000_000), |  | ||||||
|             cpu_count: Some(32), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         // Serialize to JSON
 |  | ||||||
|         let json = serde_json::to_string(&host).expect("Failed to serialize host"); |  | ||||||
| 
 |  | ||||||
|         // Check that the serialized JSON contains the Dell iDRAC details
 |  | ||||||
|         assert!(json.contains("idrac-server01")); |  | ||||||
|         assert!(json.contains("443")); |  | ||||||
|         assert!(json.contains("abcdef123456")); |  | ||||||
| 
 |  | ||||||
|         // Parse back to verify structure
 |  | ||||||
|         let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON"); |  | ||||||
| 
 |  | ||||||
|         // Verify basic structure
 |  | ||||||
|         assert_eq!(parsed["cpu_count"], 32); |  | ||||||
|         assert_eq!(parsed["memory_size"], 128_000_000); |  | ||||||
|         assert_eq!(parsed["storage"][0]["path"], serde_json::Value::Null); |  | ||||||
|     } |  | ||||||
| 
 | 
 | ||||||
|     #[test] |     #[test] | ||||||
|     fn test_different_management_implementations_produce_valid_json() { |     fn test_different_management_implementations_produce_valid_json() { | ||||||
|         // Create hosts with different management implementations
 |         // Create hosts with different management implementations
 | ||||||
|         let host1 = PhysicalHost { |         let host1 = PhysicalHost { | ||||||
|  |             id: Id::empty(), | ||||||
|             category: HostCategory::Server, |             category: HostCategory::Server, | ||||||
|             network: vec![], |             network: vec![], | ||||||
|             management: Arc::new(MockHPIlo { |  | ||||||
|                 ip: "10.0.0.1".to_string(), |  | ||||||
|                 username: "root".to_string(), |  | ||||||
|                 password: "secret".to_string(), |  | ||||||
|                 firmware_version: "3.0.0".to_string(), |  | ||||||
|             }), |  | ||||||
|             storage: vec![], |             storage: vec![], | ||||||
|             labels: vec![], |             labels: vec![], | ||||||
|             memory_size: None, |             memory_modules: vec![], | ||||||
|             cpu_count: None, |             cpus: vec![], | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         let host2 = PhysicalHost { |         let host2 = PhysicalHost { | ||||||
|  |             id: Id::empty(), | ||||||
|             category: HostCategory::Server, |             category: HostCategory::Server, | ||||||
|             network: vec![], |             network: vec![], | ||||||
|             management: Arc::new(MockDellIdrac { |  | ||||||
|                 hostname: "server02-idrac".to_string(), |  | ||||||
|                 port: 8443, |  | ||||||
|                 api_token: "token123".to_string(), |  | ||||||
|             }), |  | ||||||
|             storage: vec![], |             storage: vec![], | ||||||
|             labels: vec![], |             labels: vec![], | ||||||
|             memory_size: None, |             memory_modules: vec![], | ||||||
|             cpu_count: None, |             cpus: vec![], | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         // Both should serialize successfully
 |         // Both should serialize successfully
 | ||||||
| @ -410,8 +382,5 @@ mod tests { | |||||||
|         // Both JSONs should be valid and parseable
 |         // Both JSONs should be valid and parseable
 | ||||||
|         let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1"); |         let _: serde_json::Value = serde_json::from_str(&json1).expect("Invalid JSON for host1"); | ||||||
|         let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2"); |         let _: serde_json::Value = serde_json::from_str(&json2).expect("Invalid JSON for host2"); | ||||||
| 
 |  | ||||||
|         // The JSONs should be different because they contain different management interfaces
 |  | ||||||
|         assert_ne!(json1, json2); |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -1,6 +1,5 @@ | |||||||
| use log::debug; |  | ||||||
| use once_cell::sync::Lazy; | use once_cell::sync::Lazy; | ||||||
| use tokio::sync::broadcast; | use std::{collections::HashMap, sync::Mutex}; | ||||||
| 
 | 
 | ||||||
| use crate::modules::application::ApplicationFeatureStatus; | use crate::modules::application::ApplicationFeatureStatus; | ||||||
| 
 | 
 | ||||||
| @ -40,43 +39,46 @@ pub enum HarmonyEvent { | |||||||
|     }, |     }, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static HARMONY_EVENT_BUS: Lazy<broadcast::Sender<HarmonyEvent>> = Lazy::new(|| { | type Subscriber = Box<dyn Fn(&HarmonyEvent) + Send + Sync>; | ||||||
|     // TODO: Adjust channel capacity
 |  | ||||||
|     let (tx, _rx) = broadcast::channel(100); |  | ||||||
|     tx |  | ||||||
| }); |  | ||||||
| 
 | 
 | ||||||
| pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> { | static SUBSCRIBERS: Lazy<Mutex<HashMap<String, Subscriber>>> = | ||||||
|     if cfg!(any(test, feature = "testing")) { |     Lazy::new(|| Mutex::new(HashMap::new())); | ||||||
|         let _ = event; // Suppress the "unused variable" warning for `event`
 |  | ||||||
|         Ok(()) |  | ||||||
|     } else { |  | ||||||
|         match HARMONY_EVENT_BUS.send(event) { |  | ||||||
|             Ok(_) => Ok(()), |  | ||||||
|             Err(_) => Err("send error: no subscribers"), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| pub async fn subscribe<F, Fut>(name: &str, mut handler: F) | /// Subscribes a listener to all instrumentation events.
 | ||||||
|  | ///
 | ||||||
|  | /// Simply provide a unique name and a closure to run when an event happens.
 | ||||||
|  | ///
 | ||||||
|  | /// # Example
 | ||||||
|  | /// ```
 | ||||||
|  | /// use harmony::instrumentation;
 | ||||||
|  | /// instrumentation::subscribe("my_logger", |event| {
 | ||||||
|  | ///   println!("Event occurred: {:?}", event);
 | ||||||
|  | /// });
 | ||||||
|  | /// ```
 | ||||||
|  | pub fn subscribe<F>(name: &str, callback: F) | ||||||
| where | where | ||||||
|     F: FnMut(HarmonyEvent) -> Fut + Send + 'static, |     F: Fn(&HarmonyEvent) + Send + Sync + 'static, | ||||||
|     Fut: Future<Output = bool> + Send, |  | ||||||
| { | { | ||||||
|     let mut rx = HARMONY_EVENT_BUS.subscribe(); |     let mut subs = SUBSCRIBERS.lock().unwrap(); | ||||||
|     debug!("[{name}] Service started. Listening for events..."); |     subs.insert(name.to_string(), Box::new(callback)); | ||||||
|     loop { | } | ||||||
|         match rx.recv().await { | 
 | ||||||
|             Ok(event) => { | /// Instruments an event, notifying all subscribers.
 | ||||||
|                 if !handler(event).await { | ///
 | ||||||
|                     debug!("[{name}] Handler requested exit."); | /// This will call every closure that was registered with `subscribe`.
 | ||||||
|                     break; | ///
 | ||||||
|                 } | /// # Example
 | ||||||
|             } | /// ```
 | ||||||
|             Err(broadcast::error::RecvError::Lagged(n)) => { | /// use harmony::instrumentation;
 | ||||||
|                 debug!("[{name}] Lagged behind by {n} messages."); | /// use harmony::instrumentation::HarmonyEvent;
 | ||||||
|             } | /// instrumentation::instrument(HarmonyEvent::HarmonyStarted);
 | ||||||
|             Err(_) => break, | /// ```
 | ||||||
|         } | pub fn instrument(event: HarmonyEvent) -> Result<(), &'static str> { | ||||||
|     } |     let subs = SUBSCRIBERS.lock().unwrap(); | ||||||
|  | 
 | ||||||
|  |     for callback in subs.values() { | ||||||
|  |         callback(&event); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     Ok(()) | ||||||
| } | } | ||||||
|  | |||||||
| @ -1,13 +1,11 @@ | |||||||
|  | use harmony_types::id::Id; | ||||||
| use std::error::Error; | use std::error::Error; | ||||||
| 
 | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
| 
 | 
 | ||||||
| use super::{ | use super::{ | ||||||
|     data::{Id, Version}, |     data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError, | ||||||
|     executors::ExecutorError, |  | ||||||
|     inventory::Inventory, |  | ||||||
|     topology::PreparationError, |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| pub enum InterpretName { | pub enum InterpretName { | ||||||
| @ -32,6 +30,7 @@ pub enum InterpretName { | |||||||
|     Lamp, |     Lamp, | ||||||
|     ApplicationMonitoring, |     ApplicationMonitoring, | ||||||
|     K8sPrometheusCrdAlerting, |     K8sPrometheusCrdAlerting, | ||||||
|  |     DiscoverInventoryAgent, | ||||||
|     CephClusterHealth, |     CephClusterHealth, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -59,6 +58,7 @@ impl std::fmt::Display for InterpretName { | |||||||
|             InterpretName::Lamp => f.write_str("LAMP"), |             InterpretName::Lamp => f.write_str("LAMP"), | ||||||
|             InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), |             InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"), | ||||||
|             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), |             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), | ||||||
|  |             InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), | ||||||
|             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), |             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -1,3 +1,6 @@ | |||||||
|  | mod repository; | ||||||
|  | pub use repository::*; | ||||||
|  | 
 | ||||||
| #[derive(Debug, new, Clone)] | #[derive(Debug, new, Clone)] | ||||||
| pub struct InventoryFilter { | pub struct InventoryFilter { | ||||||
|     target: Vec<Filter>, |     target: Vec<Filter>, | ||||||
| @ -15,6 +18,8 @@ impl InventoryFilter { | |||||||
| use derive_new::new; | use derive_new::new; | ||||||
| use log::info; | use log::info; | ||||||
| 
 | 
 | ||||||
|  | use crate::hardware::{ManagementInterface, ManualManagementInterface}; | ||||||
|  | 
 | ||||||
| use super::{ | use super::{ | ||||||
|     filter::Filter, |     filter::Filter, | ||||||
|     hardware::{FirewallGroup, HostGroup, Location, SwitchGroup}, |     hardware::{FirewallGroup, HostGroup, Location, SwitchGroup}, | ||||||
| @ -27,7 +32,7 @@ pub struct Inventory { | |||||||
|     // Firewall is really just a host but with somewhat specialized hardware
 |     // Firewall is really just a host but with somewhat specialized hardware
 | ||||||
|     // I'm not entirely sure it belongs to its own category but it helps make things easier and
 |     // I'm not entirely sure it belongs to its own category but it helps make things easier and
 | ||||||
|     // clearer for now so let's try it this way.
 |     // clearer for now so let's try it this way.
 | ||||||
|     pub firewall: FirewallGroup, |     pub firewall_mgmt: Box<dyn ManagementInterface>, | ||||||
|     pub worker_host: HostGroup, |     pub worker_host: HostGroup, | ||||||
|     pub storage_host: HostGroup, |     pub storage_host: HostGroup, | ||||||
|     pub control_plane_host: HostGroup, |     pub control_plane_host: HostGroup, | ||||||
| @ -38,7 +43,7 @@ impl Inventory { | |||||||
|         Self { |         Self { | ||||||
|             location: Location::new("Empty".to_string(), "location".to_string()), |             location: Location::new("Empty".to_string(), "location".to_string()), | ||||||
|             switch: vec![], |             switch: vec![], | ||||||
|             firewall: vec![], |             firewall_mgmt: Box::new(ManualManagementInterface {}), | ||||||
|             worker_host: vec![], |             worker_host: vec![], | ||||||
|             storage_host: vec![], |             storage_host: vec![], | ||||||
|             control_plane_host: vec![], |             control_plane_host: vec![], | ||||||
| @ -49,7 +54,7 @@ impl Inventory { | |||||||
|         Self { |         Self { | ||||||
|             location: Location::test_building(), |             location: Location::test_building(), | ||||||
|             switch: SwitchGroup::new(), |             switch: SwitchGroup::new(), | ||||||
|             firewall: FirewallGroup::new(), |             firewall_mgmt: Box::new(ManualManagementInterface {}), | ||||||
|             worker_host: HostGroup::new(), |             worker_host: HostGroup::new(), | ||||||
|             storage_host: HostGroup::new(), |             storage_host: HostGroup::new(), | ||||||
|             control_plane_host: HostGroup::new(), |             control_plane_host: HostGroup::new(), | ||||||
|  | |||||||
							
								
								
									
										25
									
								
								harmony/src/domain/inventory/repository.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								harmony/src/domain/inventory/repository.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,25 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | 
 | ||||||
|  | use crate::hardware::PhysicalHost; | ||||||
|  | 
 | ||||||
|  | /// Errors that can occur within the repository layer.
 | ||||||
|  | #[derive(thiserror::Error, Debug)] | ||||||
|  | pub enum RepoError { | ||||||
|  |     #[error("Database query failed: {0}")] | ||||||
|  |     QueryFailed(String), | ||||||
|  |     #[error("Data serialization failed: {0}")] | ||||||
|  |     Serialization(String), | ||||||
|  |     #[error("Data deserialization failed: {0}")] | ||||||
|  |     Deserialization(String), | ||||||
|  |     #[error("Could not connect to the database: {0}")] | ||||||
|  |     ConnectionFailed(String), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // --- Trait and Implementation ---
 | ||||||
|  | 
 | ||||||
|  | /// Defines the contract for inventory persistence.
 | ||||||
|  | #[async_trait] | ||||||
|  | pub trait InventoryRepository: Send + Sync + 'static { | ||||||
|  |     async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; | ||||||
|  |     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>; | ||||||
|  | } | ||||||
| @ -74,6 +74,7 @@ impl<T: Topology> Maestro<T> { | |||||||
| 
 | 
 | ||||||
|     fn is_topology_initialized(&self) -> bool { |     fn is_topology_initialized(&self) -> bool { | ||||||
|         self.topology_state.status == TopologyStatus::Success |         self.topology_state.status == TopologyStatus::Success | ||||||
|  |             || self.topology_state.status == TopologyStatus::Noop | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> { |     pub async fn interpret(&self, score: Box<dyn Score<T>>) -> Result<Outcome, InterpretError> { | ||||||
|  | |||||||
| @ -1,3 +1,4 @@ | |||||||
|  | use harmony_types::id::Id; | ||||||
| use std::collections::BTreeMap; | use std::collections::BTreeMap; | ||||||
| 
 | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| @ -5,7 +6,6 @@ use serde::Serialize; | |||||||
| use serde_value::Value; | use serde_value::Value; | ||||||
| 
 | 
 | ||||||
| use super::{ | use super::{ | ||||||
|     data::Id, |  | ||||||
|     instrumentation::{self, HarmonyEvent}, |     instrumentation::{self, HarmonyEvent}, | ||||||
|     interpret::{Interpret, InterpretError, Outcome}, |     interpret::{Interpret, InterpretError, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|  | |||||||
| @ -1,9 +1,13 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use harmony_macros::ip; | use harmony_macros::ip; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::MacAddress; | ||||||
|  | use harmony_types::net::Url; | ||||||
|  | use log::debug; | ||||||
| use log::info; | use log::info; | ||||||
| 
 | 
 | ||||||
|  | use crate::data::FileContent; | ||||||
| use crate::executors::ExecutorError; | use crate::executors::ExecutorError; | ||||||
|  | use crate::topology::PxeOptions; | ||||||
| 
 | 
 | ||||||
| use super::DHCPStaticEntry; | use super::DHCPStaticEntry; | ||||||
| use super::DhcpServer; | use super::DhcpServer; | ||||||
| @ -23,7 +27,6 @@ use super::Router; | |||||||
| use super::TftpServer; | use super::TftpServer; | ||||||
| 
 | 
 | ||||||
| use super::Topology; | use super::Topology; | ||||||
| use super::Url; |  | ||||||
| use super::k8s::K8sClient; | use super::k8s::K8sClient; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| 
 | 
 | ||||||
| @ -49,9 +52,10 @@ impl Topology for HAClusterTopology { | |||||||
|         "HAClusterTopology" |         "HAClusterTopology" | ||||||
|     } |     } | ||||||
|     async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> { |     async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> { | ||||||
|         todo!( |         debug!( | ||||||
|             "ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready." |             "ensure_ready, not entirely sure what it should do here, probably something like verify that the hosts are reachable and all services are up and ready." | ||||||
|         ) |         ); | ||||||
|  |         Ok(PreparationOutcome::Noop) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -153,12 +157,10 @@ impl DhcpServer for HAClusterTopology { | |||||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> { |     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> { | ||||||
|         self.dhcp_server.list_static_mappings().await |         self.dhcp_server.list_static_mappings().await | ||||||
|     } |     } | ||||||
|     async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> { |     async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> { | ||||||
|         self.dhcp_server.set_next_server(ip).await |         self.dhcp_server.set_pxe_options(options).await | ||||||
|     } |  | ||||||
|     async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         self.dhcp_server.set_boot_filename(boot_filename).await |  | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         self.dhcp_server.get_ip() |         self.dhcp_server.get_ip() | ||||||
|     } |     } | ||||||
| @ -168,16 +170,6 @@ impl DhcpServer for HAClusterTopology { | |||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError> { |     async fn commit_config(&self) -> Result<(), ExecutorError> { | ||||||
|         self.dhcp_server.commit_config().await |         self.dhcp_server.commit_config().await | ||||||
|     } |     } | ||||||
| 
 |  | ||||||
|     async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         self.dhcp_server.set_filename(filename).await |  | ||||||
|     } |  | ||||||
|     async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> { |  | ||||||
|         self.dhcp_server.set_filename64(filename64).await |  | ||||||
|     } |  | ||||||
|     async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> { |  | ||||||
|         self.dhcp_server.set_filenameipxe(filenameipxe).await |  | ||||||
|     } |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| @ -221,17 +213,21 @@ impl HttpServer for HAClusterTopology { | |||||||
|         self.http_server.serve_files(url).await |         self.http_server.serve_files(url).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { | ||||||
|  |         self.http_server.serve_file_content(file).await | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         self.http_server.get_ip() | ||||||
|     } |     } | ||||||
|     async fn ensure_initialized(&self) -> Result<(), ExecutorError> { |     async fn ensure_initialized(&self) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         self.http_server.ensure_initialized().await | ||||||
|     } |     } | ||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError> { |     async fn commit_config(&self) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         self.http_server.commit_config().await | ||||||
|     } |     } | ||||||
|     async fn reload_restart(&self) -> Result<(), ExecutorError> { |     async fn reload_restart(&self) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         self.http_server.reload_restart().await | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -299,19 +295,7 @@ impl DhcpServer for DummyInfra { | |||||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> { |     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|     async fn set_next_server(&self, _ip: IpAddress) -> Result<(), ExecutorError> { |     async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |  | ||||||
|     } |  | ||||||
|     async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |  | ||||||
|     } |  | ||||||
|     async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |  | ||||||
|     } |  | ||||||
|     async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |  | ||||||
|     } |  | ||||||
|     async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> { |  | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
| @ -381,6 +365,9 @@ impl HttpServer for DummyInfra { | |||||||
|     async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> { |     async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|  |     async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> { | ||||||
|  |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|  |     } | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -1,11 +1,12 @@ | |||||||
| use crate::executors::ExecutorError; | use crate::{data::FileContent, executors::ExecutorError}; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| 
 | 
 | ||||||
| use super::{IpAddress, Url}; | use harmony_types::net::IpAddress; | ||||||
| 
 | use harmony_types::net::Url; | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait HttpServer: Send + Sync { | pub trait HttpServer: Send + Sync { | ||||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>; |     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>; | ||||||
|  |     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>; | ||||||
|     fn get_ip(&self) -> IpAddress; |     fn get_ip(&self) -> IpAddress; | ||||||
| 
 | 
 | ||||||
|     // async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;
 |     // async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError>;
 | ||||||
|  | |||||||
| @ -185,7 +185,10 @@ impl K8sClient { | |||||||
|                 if let Some(s) = status.status { |                 if let Some(s) = status.status { | ||||||
|                     let mut stdout_buf = String::new(); |                     let mut stdout_buf = String::new(); | ||||||
|                     if let Some(mut stdout) = process.stdout().take() { |                     if let Some(mut stdout) = process.stdout().take() { | ||||||
|                         stdout.read_to_string(&mut stdout_buf).await; |                         stdout | ||||||
|  |                             .read_to_string(&mut stdout_buf) | ||||||
|  |                             .await | ||||||
|  |                             .map_err(|e| format!("Failed to get status stdout {e}"))?; | ||||||
|                     } |                     } | ||||||
|                     debug!("Status: {} - {:?}", s, status.details); |                     debug!("Status: {} - {:?}", s, status.details); | ||||||
|                     if s == "Success" { |                     if s == "Success" { | ||||||
|  | |||||||
| @ -4,8 +4,9 @@ use async_trait::async_trait; | |||||||
| use log::debug; | use log::debug; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use super::{IpAddress, LogicalHost}; | use super::LogicalHost; | ||||||
| use crate::executors::ExecutorError; | use crate::executors::ExecutorError; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| impl std::fmt::Debug for dyn LoadBalancer { | impl std::fmt::Debug for dyn LoadBalancer { | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  | |||||||
| @ -1,4 +1,5 @@ | |||||||
| mod ha_cluster; | mod ha_cluster; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| mod host_binding; | mod host_binding; | ||||||
| mod http; | mod http; | ||||||
| pub mod installable; | pub mod installable; | ||||||
| @ -32,7 +33,6 @@ use super::{ | |||||||
|     instrumentation::{self, HarmonyEvent}, |     instrumentation::{self, HarmonyEvent}, | ||||||
| }; | }; | ||||||
| use std::error::Error; | use std::error::Error; | ||||||
| use std::net::IpAddr; |  | ||||||
| 
 | 
 | ||||||
| /// Represents a logical view of an infrastructure environment providing specific capabilities.
 | /// Represents a logical view of an infrastructure environment providing specific capabilities.
 | ||||||
| ///
 | ///
 | ||||||
| @ -196,35 +196,6 @@ pub trait MultiTargetTopology: Topology { | |||||||
|     fn current_target(&self) -> DeploymentTarget; |     fn current_target(&self) -> DeploymentTarget; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub type IpAddress = IpAddr; |  | ||||||
| 
 |  | ||||||
| #[derive(Debug, Clone)] |  | ||||||
| pub enum Url { |  | ||||||
|     LocalFolder(String), |  | ||||||
|     Url(url::Url), |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl Serialize for Url { |  | ||||||
|     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |  | ||||||
|     where |  | ||||||
|         S: serde::Serializer, |  | ||||||
|     { |  | ||||||
|         match self { |  | ||||||
|             Url::LocalFolder(path) => serializer.serialize_str(path), |  | ||||||
|             Url::Url(url) => serializer.serialize_str(url.as_str()), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl std::fmt::Display for Url { |  | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |  | ||||||
|         match self { |  | ||||||
|             Url::LocalFolder(path) => write!(f, "{}", path), |  | ||||||
|             Url::Url(url) => write!(f, "{}", url), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// Represents a logical member of a cluster that provides one or more services.
 | /// Represents a logical member of a cluster that provides one or more services.
 | ||||||
| ///
 | ///
 | ||||||
| /// A LogicalHost can represent various roles within the infrastructure, such as:
 | /// A LogicalHost can represent various roles within the infrastructure, such as:
 | ||||||
| @ -263,7 +234,8 @@ impl LogicalHost { | |||||||
|     ///
 |     ///
 | ||||||
|     /// ```
 |     /// ```
 | ||||||
|     /// use std::str::FromStr;
 |     /// use std::str::FromStr;
 | ||||||
|     /// use harmony::topology::{IpAddress, LogicalHost};
 |     /// use harmony::topology::{LogicalHost};
 | ||||||
|  |     /// use harmony_types::net::IpAddress;
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
 |     /// let start_ip = IpAddress::from_str("192.168.0.20").unwrap();
 | ||||||
|     /// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
 |     /// let hosts = LogicalHost::create_hosts(3, start_ip, "worker");
 | ||||||
| @ -319,7 +291,7 @@ fn increment_ip(ip: IpAddress, increment: u32) -> Option<IpAddress> { | |||||||
| 
 | 
 | ||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| mod tests { | mod tests { | ||||||
|     use super::*; |     use harmony_types::net::Url; | ||||||
|     use serde_json; |     use serde_json; | ||||||
| 
 | 
 | ||||||
|     #[test] |     #[test] | ||||||
|  | |||||||
| @ -1,12 +1,12 @@ | |||||||
| use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; | use std::{net::Ipv4Addr, str::FromStr, sync::Arc}; | ||||||
| 
 | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::{IpAddress, MacAddress}; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::executors::ExecutorError; | use crate::executors::ExecutorError; | ||||||
| 
 | 
 | ||||||
| use super::{IpAddress, LogicalHost, k8s::K8sClient}; | use super::{LogicalHost, k8s::K8sClient}; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| pub struct DHCPStaticEntry { | pub struct DHCPStaticEntry { | ||||||
| @ -46,16 +46,19 @@ pub trait K8sclient: Send + Sync { | |||||||
|     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>; |     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | pub struct PxeOptions { | ||||||
|  |     pub ipxe_filename: String, | ||||||
|  |     pub bios_filename: String, | ||||||
|  |     pub efi_filename: String, | ||||||
|  |     pub tftp_ip: Option<IpAddress>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait DhcpServer: Send + Sync + std::fmt::Debug { | pub trait DhcpServer: Send + Sync + std::fmt::Debug { | ||||||
|     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; |     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>; | ||||||
|     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; |     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; | ||||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; |     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; | ||||||
|     async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>; |     async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; | ||||||
|     async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>; |  | ||||||
|     async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>; |  | ||||||
|     async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>; |  | ||||||
|     async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>; |  | ||||||
|     fn get_ip(&self) -> IpAddress; |     fn get_ip(&self) -> IpAddress; | ||||||
|     fn get_host(&self) -> LogicalHost; |     fn get_host(&self) -> LogicalHost; | ||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError>; |     async fn commit_config(&self) -> Result<(), ExecutorError>; | ||||||
|  | |||||||
| @ -4,11 +4,12 @@ use async_trait::async_trait; | |||||||
| use log::debug; | use log::debug; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     topology::{Topology, installable::Installable}, |     topology::{Topology, installable::Installable}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait AlertSender: Send + Sync + std::fmt::Debug { | pub trait AlertSender: Send + Sync + std::fmt::Debug { | ||||||
|  | |||||||
| @ -2,7 +2,7 @@ pub mod k8s; | |||||||
| mod manager; | mod manager; | ||||||
| pub mod network_policy; | pub mod network_policy; | ||||||
| 
 | 
 | ||||||
| use crate::data::Id; | use harmony_types::id::Id; | ||||||
| pub use manager::*; | pub use manager::*; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::str::FromStr; | use std::str::FromStr; | ||||||
|  | |||||||
| @ -1,7 +1,7 @@ | |||||||
| use crate::executors::ExecutorError; | use crate::executors::ExecutorError; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| 
 | 
 | ||||||
| use super::{IpAddress, Url}; | use harmony_types::net::{IpAddress, Url}; | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait TftpServer: Send + Sync { | pub trait TftpServer: Send + Sync { | ||||||
|  | |||||||
| @ -3,11 +3,9 @@ use std::sync::Arc; | |||||||
| 
 | 
 | ||||||
| use russh::{client, keys::key}; | use russh::{client, keys::key}; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::domain::executors::{ExecutorError, SshClient}; | ||||||
|     domain::executors::{ExecutorError, SshClient}, |  | ||||||
|     topology::IpAddress, |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| pub struct RusshClient; | pub struct RusshClient; | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| use crate::hardware::ManagementInterface; | use crate::hardware::ManagementInterface; | ||||||
| use crate::topology::IpAddress; |  | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::MacAddress; | ||||||
| use log::info; | use log::info; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
|  | |||||||
							
								
								
									
										17
									
								
								harmony/src/infra/inventory/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								harmony/src/infra/inventory/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,17 @@ | |||||||
|  | use crate::{ | ||||||
|  |     config::DATABASE_URL, | ||||||
|  |     infra::inventory::sqlite::SqliteInventoryRepository, | ||||||
|  |     inventory::{InventoryRepository, RepoError}, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub mod sqlite; | ||||||
|  | 
 | ||||||
|  | pub struct InventoryRepositoryFactory; | ||||||
|  | 
 | ||||||
|  | impl InventoryRepositoryFactory { | ||||||
|  |     pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> { | ||||||
|  |         Ok(Box::new( | ||||||
|  |             SqliteInventoryRepository::new(&(*DATABASE_URL)).await?, | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										65
									
								
								harmony/src/infra/inventory/sqlite.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								harmony/src/infra/inventory/sqlite.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,65 @@ | |||||||
|  | use crate::{ | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     inventory::{InventoryRepository, RepoError}, | ||||||
|  | }; | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::info; | ||||||
|  | use sqlx::{Pool, Sqlite, SqlitePool}; | ||||||
|  | 
 | ||||||
|  | /// A thread-safe, connection-pooled repository using SQLite.
 | ||||||
|  | #[derive(Debug)] | ||||||
|  | pub struct SqliteInventoryRepository { | ||||||
|  |     pool: Pool<Sqlite>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl SqliteInventoryRepository { | ||||||
|  |     pub async fn new(database_url: &str) -> Result<Self, RepoError> { | ||||||
|  |         let pool = SqlitePool::connect(database_url) | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| RepoError::ConnectionFailed(e.to_string()))?; | ||||||
|  | 
 | ||||||
|  |         info!("SQLite inventory repository initialized at '{database_url}'"); | ||||||
|  |         Ok(Self { pool }) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl InventoryRepository for SqliteInventoryRepository { | ||||||
|  |     async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError> { | ||||||
|  |         let data = serde_json::to_vec(host).map_err(|e| RepoError::Serialization(e.to_string()))?; | ||||||
|  | 
 | ||||||
|  |         let id = Id::default().to_string(); | ||||||
|  |         let host_id = host.id.to_string(); | ||||||
|  | 
 | ||||||
|  |         sqlx::query!( | ||||||
|  |             "INSERT INTO physical_hosts (id, version_id, data) VALUES (?, ?, ?)", | ||||||
|  |             host_id, | ||||||
|  |             id, | ||||||
|  |             data, | ||||||
|  |         ) | ||||||
|  |         .execute(&self.pool) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         info!("Saved new inventory version for host '{}'", host.id); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> { | ||||||
|  |         let _row = sqlx::query_as!( | ||||||
|  |             DbHost, | ||||||
|  |             r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, | ||||||
|  |             host_id | ||||||
|  |         ) | ||||||
|  |         .fetch_optional(&self.pool) | ||||||
|  |         .await?; | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | use sqlx::types::Json; | ||||||
|  | struct DbHost { | ||||||
|  |     data: Json<PhysicalHost>, | ||||||
|  |     id: Id, | ||||||
|  |     version_id: Id, | ||||||
|  | } | ||||||
| @ -1,4 +1,6 @@ | |||||||
| pub mod executors; | pub mod executors; | ||||||
| pub mod hp_ilo; | pub mod hp_ilo; | ||||||
| pub mod intel_amt; | pub mod intel_amt; | ||||||
|  | pub mod inventory; | ||||||
| pub mod opnsense; | pub mod opnsense; | ||||||
|  | mod sqlx; | ||||||
|  | |||||||
| @ -1,13 +1,14 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::MacAddress; | ||||||
| use log::debug; | use log::info; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
|     topology::{DHCPStaticEntry, DhcpServer, IpAddress, LogicalHost}, |     topology::{DHCPStaticEntry, DhcpServer, LogicalHost, PxeOptions}, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl DhcpServer for OPNSenseFirewall { | impl DhcpServer for OPNSenseFirewall { | ||||||
| @ -26,7 +27,7 @@ impl DhcpServer for OPNSenseFirewall { | |||||||
|                 .unwrap(); |                 .unwrap(); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         debug!("Registered {:?}", entry); |         info!("Registered {:?}", entry); | ||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -46,57 +47,25 @@ impl DhcpServer for OPNSenseFirewall { | |||||||
|         self.host.clone() |         self.host.clone() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError> { |     async fn set_pxe_options(&self, options: PxeOptions) -> Result<(), ExecutorError> { | ||||||
|         let ipv4 = match ip { |  | ||||||
|             std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, |  | ||||||
|             std::net::IpAddr::V6(_) => todo!("ipv6 not supported yet"), |  | ||||||
|         }; |  | ||||||
|         { |  | ||||||
|         let mut writable_opnsense = self.opnsense_config.write().await; |         let mut writable_opnsense = self.opnsense_config.write().await; | ||||||
|             writable_opnsense.dhcp().set_next_server(ipv4); |         let PxeOptions { | ||||||
|             debug!("OPNsense dhcp server set next server {ipv4}"); |             ipxe_filename, | ||||||
|         } |             bios_filename, | ||||||
| 
 |             efi_filename, | ||||||
|         Ok(()) |             tftp_ip, | ||||||
|     } |         } = options; | ||||||
| 
 |         writable_opnsense | ||||||
|     async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError> { |             .dhcp() | ||||||
|         { |             .set_pxe_options( | ||||||
|             let mut writable_opnsense = self.opnsense_config.write().await; |                 tftp_ip.map(|i| i.to_string()), | ||||||
|             writable_opnsense.dhcp().set_boot_filename(boot_filename); |                 bios_filename, | ||||||
|             debug!("OPNsense dhcp server set boot filename {boot_filename}"); |                 efi_filename, | ||||||
|         } |                 ipxe_filename, | ||||||
| 
 |             ) | ||||||
|         Ok(()) |             .await | ||||||
|     } |             .map_err(|dhcp_error| { | ||||||
| 
 |                 ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) | ||||||
|     async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> { |             }) | ||||||
|         { |  | ||||||
|             let mut writable_opnsense = self.opnsense_config.write().await; |  | ||||||
|             writable_opnsense.dhcp().set_filename(filename); |  | ||||||
|             debug!("OPNsense dhcp server set filename {filename}"); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         Ok(()) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> { |  | ||||||
|         { |  | ||||||
|             let mut writable_opnsense = self.opnsense_config.write().await; |  | ||||||
|             writable_opnsense.dhcp().set_filename64(filename); |  | ||||||
|             debug!("OPNsense dhcp server set filename {filename}"); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         Ok(()) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> { |  | ||||||
|         { |  | ||||||
|             let mut writable_opnsense = self.opnsense_config.write().await; |  | ||||||
|             writable_opnsense.dhcp().set_filenameipxe(filenameipxe); |  | ||||||
|             debug!("OPNsense dhcp server set filenameipxe {filenameipxe}"); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         Ok(()) |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -1,11 +1,11 @@ | |||||||
| use crate::infra::opnsense::Host; | use crate::infra::opnsense::Host; | ||||||
| use crate::infra::opnsense::IpAddress; |  | ||||||
| use crate::infra::opnsense::LogicalHost; | use crate::infra::opnsense::LogicalHost; | ||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
|     topology::{DnsRecord, DnsServer}, |     topology::{DnsRecord, DnsServer}, | ||||||
| }; | }; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,9 +1,10 @@ | |||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
|     topology::{Firewall, FirewallRule, IpAddress, LogicalHost}, |     topology::{Firewall, FirewallRule, LogicalHost}, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| impl Firewall for OPNSenseFirewall { | impl Firewall for OPNSenseFirewall { | ||||||
|     fn add_rule(&mut self, _rule: FirewallRule) -> Result<(), ExecutorError> { |     fn add_rule(&mut self, _rule: FirewallRule) -> Result<(), ExecutorError> { | ||||||
|  | |||||||
| @ -1,24 +1,22 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::info; | use log::info; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{data::FileContent, executors::ExecutorError, topology::HttpServer}; | ||||||
|     executors::ExecutorError, |  | ||||||
|     topology::{HttpServer, IpAddress, Url}, |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
|  | use harmony_types::net::Url; | ||||||
|  | const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http"; | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl HttpServer for OPNSenseFirewall { | impl HttpServer for OPNSenseFirewall { | ||||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { |     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { | ||||||
|         let http_root_path = "/usr/local/http"; |  | ||||||
| 
 |  | ||||||
|         let config = self.opnsense_config.read().await; |         let config = self.opnsense_config.read().await; | ||||||
|         info!("Uploading files from url {url} to {http_root_path}"); |         info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}"); | ||||||
|         match url { |         match url { | ||||||
|             Url::LocalFolder(path) => { |             Url::LocalFolder(path) => { | ||||||
|                 config |                 config | ||||||
|                     .upload_files(path, http_root_path) |                     .upload_files(path, OPNSENSE_HTTP_ROOT_PATH) | ||||||
|                     .await |                     .await | ||||||
|                     .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; |                     .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; | ||||||
|             } |             } | ||||||
| @ -27,8 +25,29 @@ impl HttpServer for OPNSenseFirewall { | |||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { | ||||||
|  |         let path = match &file.path { | ||||||
|  |             crate::data::FilePath::Relative(path) => { | ||||||
|  |                 format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string()) | ||||||
|  |             } | ||||||
|  |             crate::data::FilePath::Absolute(path) => { | ||||||
|  |                 return Err(ExecutorError::ConfigurationError(format!( | ||||||
|  |                     "Cannot serve file from http server with absolute path : {path}" | ||||||
|  |                 ))); | ||||||
|  |             } | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         let config = self.opnsense_config.read().await; | ||||||
|  |         info!("Uploading file content to {}", path); | ||||||
|  |         config | ||||||
|  |             .upload_file_content(&path, &file.content) | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         todo!(); |         OPNSenseFirewall::get_ip(self) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError> { |     async fn commit_config(&self) -> Result<(), ExecutorError> { | ||||||
|  | |||||||
| @ -6,10 +6,11 @@ use uuid::Uuid; | |||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
|     topology::{ |     topology::{ | ||||||
|         BackendServer, HealthCheck, HttpMethod, HttpStatusCode, IpAddress, LoadBalancer, |         BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService, | ||||||
|         LoadBalancerService, LogicalHost, |         LogicalHost, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -11,10 +11,8 @@ pub use management::*; | |||||||
| use opnsense_config_xml::Host; | use opnsense_config_xml::Host; | ||||||
| use tokio::sync::RwLock; | use tokio::sync::RwLock; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{executors::ExecutorError, topology::LogicalHost}; | ||||||
|     executors::ExecutorError, | use harmony_types::net::IpAddress; | ||||||
|     topology::{IpAddress, LogicalHost}, |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone)] | #[derive(Debug, Clone)] | ||||||
| pub struct OPNSenseFirewall { | pub struct OPNSenseFirewall { | ||||||
|  | |||||||
| @ -1,10 +1,9 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::info; | use log::info; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{executors::ExecutorError, topology::TftpServer}; | ||||||
|     executors::ExecutorError, | use harmony_types::net::IpAddress; | ||||||
|     topology::{IpAddress, TftpServer, Url}, | use harmony_types::net::Url; | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| use super::OPNSenseFirewall; | use super::OPNSenseFirewall; | ||||||
| 
 | 
 | ||||||
| @ -28,7 +27,7 @@ impl TftpServer for OPNSenseFirewall { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         todo!() |         OPNSenseFirewall::get_ip(self) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> { |     async fn set_ip(&self, ip: IpAddress) -> Result<(), ExecutorError> { | ||||||
|  | |||||||
							
								
								
									
										36
									
								
								harmony/src/infra/sqlx.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								harmony/src/infra/sqlx.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,36 @@ | |||||||
|  | use crate::inventory::RepoError; | ||||||
|  | 
 | ||||||
|  | impl From<sqlx::Error> for RepoError { | ||||||
|  |     fn from(value: sqlx::Error) -> Self { | ||||||
|  |         match value { | ||||||
|  |             sqlx::Error::Configuration(_) | ||||||
|  |             | sqlx::Error::Io(_) | ||||||
|  |             | sqlx::Error::Tls(_) | ||||||
|  |             | sqlx::Error::Protocol(_) | ||||||
|  |             | sqlx::Error::PoolTimedOut | ||||||
|  |             | sqlx::Error::PoolClosed | ||||||
|  |             | sqlx::Error::WorkerCrashed => RepoError::ConnectionFailed(value.to_string()), | ||||||
|  |             sqlx::Error::InvalidArgument(_) | ||||||
|  |             | sqlx::Error::Database(_) | ||||||
|  |             | sqlx::Error::RowNotFound | ||||||
|  |             | sqlx::Error::TypeNotFound { .. } | ||||||
|  |             | sqlx::Error::ColumnIndexOutOfBounds { .. } | ||||||
|  |             | sqlx::Error::ColumnNotFound(_) | ||||||
|  |             | sqlx::Error::AnyDriverError(_) | ||||||
|  |             | sqlx::Error::Migrate(_) | ||||||
|  |             | sqlx::Error::InvalidSavePointStatement | ||||||
|  |             | sqlx::Error::BeginFailed => RepoError::QueryFailed(value.to_string()), | ||||||
|  |             sqlx::Error::Encode(_) => RepoError::Serialization(value.to_string()), | ||||||
|  |             sqlx::Error::Decode(_) | sqlx::Error::ColumnDecode { .. } => { | ||||||
|  |                 RepoError::Deserialization(value.to_string()) | ||||||
|  |             } | ||||||
|  |             _ => RepoError::QueryFailed(value.to_string()), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl From<serde_json::Error> for RepoError { | ||||||
|  |     fn from(value: serde_json::Error) -> Self { | ||||||
|  |         RepoError::Serialization(value.to_string()) | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -4,13 +4,14 @@ use serde::Serialize; | |||||||
| use std::str::FromStr; | use std::str::FromStr; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::helm::chart::{HelmChartScore, HelmRepository}, |     modules::helm::chart::{HelmChartScore, HelmRepository}, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{HelmCommand, K8sclient, Topology}, |     topology::{HelmCommand, K8sclient, Topology}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| use super::ArgoApplication; | use super::ArgoApplication; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -11,7 +11,7 @@ use crate::{ | |||||||
|         alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore, |         alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore, | ||||||
|     }, |     }, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager}, |     topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager}, | ||||||
| }; | }; | ||||||
| use crate::{ | use crate::{ | ||||||
|     modules::prometheus::prometheus::PrometheusApplicationMonitoring, |     modules::prometheus::prometheus::PrometheusApplicationMonitoring, | ||||||
| @ -19,6 +19,7 @@ use crate::{ | |||||||
| }; | }; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use base64::{Engine as _, engine::general_purpose}; | use base64::{Engine as _, engine::general_purpose}; | ||||||
|  | use harmony_types::net::Url; | ||||||
| use log::{debug, info}; | use log::{debug, info}; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone)] | #[derive(Debug, Clone)] | ||||||
|  | |||||||
| @ -13,12 +13,13 @@ use async_trait::async_trait; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     instrumentation::{self, HarmonyEvent}, |     instrumentation::{self, HarmonyEvent}, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     topology::Topology, |     topology::Topology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Clone, Debug)] | #[derive(Clone, Debug)] | ||||||
| pub enum ApplicationFeatureStatus { | pub enum ApplicationFeatureStatus { | ||||||
|  | |||||||
| @ -17,10 +17,8 @@ use tar::{Archive, Builder, Header}; | |||||||
| use walkdir::WalkDir; | use walkdir::WalkDir; | ||||||
| 
 | 
 | ||||||
| use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; | use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; | ||||||
| use crate::{ | use crate::{score::Score, topology::Topology}; | ||||||
|     score::Score, | use harmony_types::net::Url; | ||||||
|     topology::{Topology, Url}, |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; | use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant}; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,5 +1,6 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
|  | use harmony_types::id::Id; | ||||||
| use log::info; | use log::info; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| @ -7,10 +8,11 @@ use crate::{ | |||||||
|     domain::{data::Version, interpret::InterpretStatus}, |     domain::{data::Version, interpret::InterpretStatus}, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     topology::{DHCPStaticEntry, DhcpServer, HostBinding, IpAddress, Topology}, |     topology::{DHCPStaticEntry, DhcpServer, HostBinding, PxeOptions, Topology}, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| use crate::domain::score::Score; | use crate::domain::score::Score; | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize)] | ||||||
| pub struct DhcpScore { | pub struct DhcpScore { | ||||||
| @ -98,69 +100,14 @@ impl DhcpInterpret { | |||||||
|         _inventory: &Inventory, |         _inventory: &Inventory, | ||||||
|         dhcp_server: &D, |         dhcp_server: &D, | ||||||
|     ) -> Result<Outcome, InterpretError> { |     ) -> Result<Outcome, InterpretError> { | ||||||
|         let next_server_outcome = match self.score.next_server { |         let pxe_options = PxeOptions { | ||||||
|             Some(next_server) => { |             ipxe_filename: self.score.filenameipxe.clone().unwrap_or_default(), | ||||||
|                 dhcp_server.set_next_server(next_server).await?; |             bios_filename: self.score.filename.clone().unwrap_or_default(), | ||||||
|                 Outcome::new( |             efi_filename: self.score.filename64.clone().unwrap_or_default(), | ||||||
|                     InterpretStatus::SUCCESS, |             tftp_ip: self.score.next_server, | ||||||
|                     format!("Dhcp Interpret Set next boot to {next_server}"), |  | ||||||
|                 ) |  | ||||||
|             } |  | ||||||
|             None => Outcome::noop(), |  | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         let boot_filename_outcome = match &self.score.boot_filename { |         dhcp_server.set_pxe_options(pxe_options).await?; | ||||||
|             Some(boot_filename) => { |  | ||||||
|                 dhcp_server.set_boot_filename(boot_filename).await?; |  | ||||||
|                 Outcome::new( |  | ||||||
|                     InterpretStatus::SUCCESS, |  | ||||||
|                     format!("Dhcp Interpret Set boot filename to {boot_filename}"), |  | ||||||
|                 ) |  | ||||||
|             } |  | ||||||
|             None => Outcome::noop(), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         let filename_outcome = match &self.score.filename { |  | ||||||
|             Some(filename) => { |  | ||||||
|                 dhcp_server.set_filename(filename).await?; |  | ||||||
|                 Outcome::new( |  | ||||||
|                     InterpretStatus::SUCCESS, |  | ||||||
|                     format!("Dhcp Interpret Set filename to {filename}"), |  | ||||||
|                 ) |  | ||||||
|             } |  | ||||||
|             None => Outcome::noop(), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         let filename64_outcome = match &self.score.filename64 { |  | ||||||
|             Some(filename64) => { |  | ||||||
|                 dhcp_server.set_filename64(filename64).await?; |  | ||||||
|                 Outcome::new( |  | ||||||
|                     InterpretStatus::SUCCESS, |  | ||||||
|                     format!("Dhcp Interpret Set filename64 to {filename64}"), |  | ||||||
|                 ) |  | ||||||
|             } |  | ||||||
|             None => Outcome::noop(), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         let filenameipxe_outcome = match &self.score.filenameipxe { |  | ||||||
|             Some(filenameipxe) => { |  | ||||||
|                 dhcp_server.set_filenameipxe(filenameipxe).await?; |  | ||||||
|                 Outcome::new( |  | ||||||
|                     InterpretStatus::SUCCESS, |  | ||||||
|                     format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"), |  | ||||||
|                 ) |  | ||||||
|             } |  | ||||||
|             None => Outcome::noop(), |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         if next_server_outcome.status == InterpretStatus::NOOP |  | ||||||
|             && boot_filename_outcome.status == InterpretStatus::NOOP |  | ||||||
|             && filename_outcome.status == InterpretStatus::NOOP |  | ||||||
|             && filename64_outcome.status == InterpretStatus::NOOP |  | ||||||
|             && filenameipxe_outcome.status == InterpretStatus::NOOP |  | ||||||
|         { |  | ||||||
|             return Ok(Outcome::noop()); |  | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         Ok(Outcome::new( |         Ok(Outcome::new( | ||||||
|             InterpretStatus::SUCCESS, |             InterpretStatus::SUCCESS, | ||||||
| @ -190,7 +137,7 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | |||||||
|         self.status.clone() |         self.status.clone() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_children(&self) -> Vec<crate::domain::data::Id> { |     fn get_children(&self) -> Vec<Id> { | ||||||
|         todo!() |         todo!() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,5 +1,6 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
|  | use harmony_types::id::Id; | ||||||
| use log::info; | use log::info; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| @ -91,7 +92,7 @@ impl<T: Topology + DnsServer> Interpret<T> for DnsInterpret { | |||||||
|         self.status.clone() |         self.status.clone() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_children(&self) -> Vec<crate::domain::data::Id> { |     fn get_children(&self) -> Vec<Id> { | ||||||
|         todo!() |         todo!() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,4 +1,5 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
|  | use harmony_types::id::Id; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
| @ -67,7 +68,7 @@ impl<T: Topology> Interpret<T> for DummyInterpret { | |||||||
|         self.status.clone() |         self.status.clone() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_children(&self) -> Vec<crate::domain::data::Id> { |     fn get_children(&self) -> Vec<Id> { | ||||||
|         todo!() |         todo!() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -113,7 +114,7 @@ impl<T: Topology> Interpret<T> for PanicInterpret { | |||||||
|         InterpretStatus::QUEUED |         InterpretStatus::QUEUED | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_children(&self) -> Vec<crate::domain::data::Id> { |     fn get_children(&self) -> Vec<Id> { | ||||||
|         todo!() |         todo!() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,9 +1,10 @@ | |||||||
| use crate::data::{Id, Version}; | use crate::data::Version; | ||||||
| use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; | use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; | ||||||
| use crate::inventory::Inventory; | use crate::inventory::Inventory; | ||||||
| use crate::score::Score; | use crate::score::Score; | ||||||
| use crate::topology::{HelmCommand, Topology}; | use crate::topology::{HelmCommand, Topology}; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
|  | use harmony_types::id::Id; | ||||||
| use helm_wrapper_rs; | use helm_wrapper_rs; | ||||||
| use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; | use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor}; | ||||||
| use log::{debug, info, warn}; | use log::{debug, info, warn}; | ||||||
|  | |||||||
| @ -8,11 +8,12 @@ use std::process::{Command, Output}; | |||||||
| use temp_dir::{self, TempDir}; | use temp_dir::{self, TempDir}; | ||||||
| use temp_file::TempFile; | use temp_file::TempFile; | ||||||
| 
 | 
 | ||||||
| use crate::data::{Id, Version}; | use crate::data::Version; | ||||||
| use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; | use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}; | ||||||
| use crate::inventory::Inventory; | use crate::inventory::Inventory; | ||||||
| use crate::score::Score; | use crate::score::Score; | ||||||
| use crate::topology::{HelmCommand, K8sclient, Topology}; | use crate::topology::{HelmCommand, K8sclient, Topology}; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Clone)] | #[derive(Clone)] | ||||||
| pub struct HelmCommandExecutor { | pub struct HelmCommandExecutor { | ||||||
|  | |||||||
| @ -3,12 +3,14 @@ use derive_new::new; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::{FileContent, Version}, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{HttpServer, Topology, Url}, |     topology::{HttpServer, Topology}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| /// Configure an HTTP server that is provided by the Topology
 | /// Configure an HTTP server that is provided by the Topology
 | ||||||
| ///
 | ///
 | ||||||
| @ -23,7 +25,8 @@ use crate::{ | |||||||
| /// ```
 | /// ```
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize)] | ||||||
| pub struct StaticFilesHttpScore { | pub struct StaticFilesHttpScore { | ||||||
|     files_to_serve: Url, |     pub folder_to_serve: Option<Url>, | ||||||
|  |     pub files: Vec<FileContent>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore { | impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore { | ||||||
| @ -50,12 +53,25 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret { | |||||||
|     ) -> Result<Outcome, InterpretError> { |     ) -> Result<Outcome, InterpretError> { | ||||||
|         http_server.ensure_initialized().await?; |         http_server.ensure_initialized().await?; | ||||||
|         // http_server.set_ip(topology.router.get_gateway()).await?;
 |         // http_server.set_ip(topology.router.get_gateway()).await?;
 | ||||||
|         http_server.serve_files(&self.score.files_to_serve).await?; |         if let Some(folder) = self.score.folder_to_serve.as_ref() { | ||||||
|  |             http_server.serve_files(folder).await?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         for f in self.score.files.iter() { | ||||||
|  |             http_server.serve_file_content(&f).await? | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|         http_server.commit_config().await?; |         http_server.commit_config().await?; | ||||||
|         http_server.reload_restart().await?; |         http_server.reload_restart().await?; | ||||||
|         Ok(Outcome::success(format!( |         Ok(Outcome::success(format!( | ||||||
|             "Http Server running and serving files from {}", |             "Http Server running and serving files from folder {:?} and content for {}", | ||||||
|             self.score.files_to_serve |             self.score.folder_to_serve, | ||||||
|  |             self.score | ||||||
|  |                 .files | ||||||
|  |                 .iter() | ||||||
|  |                 .map(|f| f.path.to_string()) | ||||||
|  |                 .collect::<Vec<String>>() | ||||||
|  |                 .join(",") | ||||||
|         ))) |         ))) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | |||||||
							
								
								
									
										146
									
								
								harmony/src/modules/inventory/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										146
									
								
								harmony/src/modules/inventory/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,146 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | use harmony_inventory_agent::local_presence::DiscoveryEvent; | ||||||
|  | use log::{debug, info, trace}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::Version, | ||||||
|  |     hardware::{HostCategory, Label, PhysicalHost}, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::Inventory, | ||||||
|  |     score::Score, | ||||||
|  |     topology::Topology, | ||||||
|  | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | 
 | ||||||
|  | /// This launches an harmony_inventory_agent discovery process
 | ||||||
|  | /// This will allow us to register/update hosts running harmony_inventory_agent
 | ||||||
|  | /// from LAN in the Harmony inventory
 | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
|  | pub struct DiscoverInventoryAgentScore { | ||||||
|  |     pub discovery_timeout: Option<u64>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: Topology> Score<T> for DiscoverInventoryAgentScore { | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "DiscoverInventoryAgentScore".to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         Box::new(DiscoverInventoryAgentInterpret { | ||||||
|  |             score: self.clone(), | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug)] | ||||||
|  | struct DiscoverInventoryAgentInterpret { | ||||||
|  |     score: DiscoverInventoryAgentScore, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret { | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         _topology: &T, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         harmony_inventory_agent::local_presence::discover_agents( | ||||||
|  |             self.score.discovery_timeout, | ||||||
|  |             |event: DiscoveryEvent| -> Result<(), String> { | ||||||
|  |                 debug!("Discovery event {event:?}"); | ||||||
|  |                 match event { | ||||||
|  |                     DiscoveryEvent::ServiceResolved(service) => { | ||||||
|  |                         let service_name = service.fullname.clone(); | ||||||
|  |                         info!("Found service {service_name}"); | ||||||
|  | 
 | ||||||
|  |                         let address = match service.get_addresses().iter().next() { | ||||||
|  |                             Some(address) => address, | ||||||
|  |                             None => { | ||||||
|  |                                 return Err(format!( | ||||||
|  |                                     "Could not find address for service {service_name}" | ||||||
|  |                                 )); | ||||||
|  |                             } | ||||||
|  |                         }; | ||||||
|  | 
 | ||||||
|  |                         let address = address.to_string(); | ||||||
|  |                         let port = service.get_port(); | ||||||
|  | 
 | ||||||
|  |                         tokio::task::spawn(async move { | ||||||
|  |                             info!("Getting inventory for host {address} at port {port}"); | ||||||
|  |                             let host = | ||||||
|  |                                 harmony_inventory_agent::client::get_host_inventory(&address, port) | ||||||
|  |                                     .await | ||||||
|  |                                     .unwrap(); | ||||||
|  | 
 | ||||||
|  |                             trace!("Found host information {host:?}"); | ||||||
|  |                             // TODO its useless to have two distinct host types but requires a bit much
 | ||||||
|  |                             // refactoring to do it now
 | ||||||
|  |                             let harmony_inventory_agent::hwinfo::PhysicalHost { | ||||||
|  |                                 storage_drives, | ||||||
|  |                                 storage_controller, | ||||||
|  |                                 memory_modules, | ||||||
|  |                                 cpus, | ||||||
|  |                                 chipset, | ||||||
|  |                                 network_interfaces, | ||||||
|  |                                 management_interface, | ||||||
|  |                                 host_uuid, | ||||||
|  |                             } = host; | ||||||
|  | 
 | ||||||
|  |                             let host = PhysicalHost { | ||||||
|  |                                 id: Id::from(host_uuid), | ||||||
|  |                                 category: HostCategory::Server, | ||||||
|  |                                 network: network_interfaces, | ||||||
|  |                                 storage: storage_drives, | ||||||
|  |                                 labels: vec![Label { | ||||||
|  |                                     name: "discovered-by".to_string(), | ||||||
|  |                                     value: "harmony-inventory-agent".to_string(), | ||||||
|  |                                 }], | ||||||
|  |                                 memory_modules, | ||||||
|  |                                 cpus, | ||||||
|  |                             }; | ||||||
|  | 
 | ||||||
|  |                             let repo = InventoryRepositoryFactory::build() | ||||||
|  |                                 .await | ||||||
|  |                                 .map_err(|e| format!("Could not build repository : {e}")) | ||||||
|  |                                 .unwrap(); | ||||||
|  |                             repo.save(&host) | ||||||
|  |                                 .await | ||||||
|  |                                 .map_err(|e| format!("Could not save host : {e}")) | ||||||
|  |                                 .unwrap(); | ||||||
|  |                             info!( | ||||||
|  |                                 "Saved new host id {}, summary : {}", | ||||||
|  |                                 host.id, | ||||||
|  |                                 host.summary() | ||||||
|  |                             ); | ||||||
|  |                         }); | ||||||
|  |                     } | ||||||
|  |                     _ => debug!("Unhandled event {event:?}"), | ||||||
|  |                 }; | ||||||
|  |                 Ok(()) | ||||||
|  |             }, | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  |         Ok(Outcome { | ||||||
|  |             status: InterpretStatus::SUCCESS, | ||||||
|  |             message: "Discovery process completed successfully".to_string(), | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::DiscoverInventoryAgent | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -3,12 +3,13 @@ use derive_new::new; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::Topology, |     topology::Topology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize)] | ||||||
| pub struct IpxeScore { | pub struct IpxeScore { | ||||||
|  | |||||||
| @ -6,12 +6,13 @@ use serde::Serialize; | |||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     config::HARMONY_DATA_DIR, |     config::HARMONY_DATA_DIR, | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::Topology, |     topology::Topology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct K3DInstallationScore { | pub struct K3DInstallationScore { | ||||||
|  | |||||||
| @ -5,12 +5,13 @@ use log::info; | |||||||
| use serde::{Serialize, de::DeserializeOwned}; | use serde::{Serialize, de::DeserializeOwned}; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{K8sclient, Topology}, |     topology::{K8sclient, Topology}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct K8sResourceScore<K: Resource + std::fmt::Debug> { | pub struct K8sResourceScore<K: Resource + std::fmt::Debug> { | ||||||
|  | |||||||
| @ -3,6 +3,7 @@ use dockerfile_builder::instruction::{CMD, COPY, ENV, EXPOSE, FROM, RUN, WORKDIR | |||||||
| use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder}; | use dockerfile_builder::{Dockerfile, instruction_builder::EnvBuilder}; | ||||||
| use fqdn::fqdn; | use fqdn::fqdn; | ||||||
| use harmony_macros::ingress_path; | use harmony_macros::ingress_path; | ||||||
|  | use harmony_types::net::Url; | ||||||
| use non_blank_string_rs::NonBlankString; | use non_blank_string_rs::NonBlankString; | ||||||
| use serde_json::json; | use serde_json::json; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| @ -18,13 +19,14 @@ use crate::config::{REGISTRY_PROJECT, REGISTRY_URL}; | |||||||
| use crate::modules::k8s::ingress::K8sIngressScore; | use crate::modules::k8s::ingress::K8sIngressScore; | ||||||
| use crate::topology::HelmCommand; | use crate::topology::HelmCommand; | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::k8s::deployment::K8sDeploymentScore, |     modules::k8s::deployment::K8sDeploymentScore, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{K8sclient, Topology, Url}, |     topology::{K8sclient, Topology}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| use super::helm::chart::HelmChartScore; | use super::helm::chart::HelmChartScore; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -3,12 +3,13 @@ use log::info; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{LoadBalancer, LoadBalancerService, Topology}, |     topology::{LoadBalancer, LoadBalancerService, Topology}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct LoadBalancerScore { | pub struct LoadBalancerScore { | ||||||
|  | |||||||
| @ -5,6 +5,7 @@ pub mod dns; | |||||||
| pub mod dummy; | pub mod dummy; | ||||||
| pub mod helm; | pub mod helm; | ||||||
| pub mod http; | pub mod http; | ||||||
|  | pub mod inventory; | ||||||
| pub mod ipxe; | pub mod ipxe; | ||||||
| pub mod k3d; | pub mod k3d; | ||||||
| pub mod k8s; | pub mod k8s; | ||||||
|  | |||||||
| @ -20,8 +20,9 @@ use crate::{ | |||||||
|         }, |         }, | ||||||
|         prometheus::prometheus::{Prometheus, PrometheusReceiver}, |         prometheus::prometheus::{Prometheus, PrometheusReceiver}, | ||||||
|     }, |     }, | ||||||
|     topology::{Url, oberservability::monitoring::AlertReceiver}, |     topology::oberservability::monitoring::AlertReceiver, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct DiscordWebhook { | pub struct DiscordWebhook { | ||||||
|  | |||||||
| @ -19,8 +19,9 @@ use crate::{ | |||||||
|         }, |         }, | ||||||
|         prometheus::prometheus::{Prometheus, PrometheusReceiver}, |         prometheus::prometheus::{Prometheus, PrometheusReceiver}, | ||||||
|     }, |     }, | ||||||
|     topology::{Url, oberservability::monitoring::AlertReceiver}, |     topology::oberservability::monitoring::AlertReceiver, | ||||||
| }; | }; | ||||||
|  | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct WebhookReceiver { | pub struct WebhookReceiver { | ||||||
|  | |||||||
| @ -4,7 +4,7 @@ use async_trait::async_trait; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
| @ -15,6 +15,7 @@ use crate::{ | |||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver}, |     topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct ApplicationMonitoringScore { | pub struct ApplicationMonitoringScore { | ||||||
|  | |||||||
| @ -6,13 +6,14 @@ use serde::Serialize; | |||||||
| use strum::{Display, EnumString}; | use strum::{Display, EnumString}; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score, |     modules::monitoring::ntfy::helm::ntfy_helm_chart::ntfy_helm_chart_score, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient}, |     topology::{HelmCommand, K8sclient, MultiTargetTopology, Topology, k8s::K8sClient}, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| pub struct NtfyScore { | pub struct NtfyScore { | ||||||
| @ -37,18 +38,6 @@ pub struct NtfyInterpret { | |||||||
|     pub score: NtfyScore, |     pub score: NtfyScore, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, EnumString, Display)] |  | ||||||
| enum NtfyAccessMode { |  | ||||||
|     #[strum(serialize = "read-write", serialize = "rw")] |  | ||||||
|     ReadWrite, |  | ||||||
|     #[strum(serialize = "read-only", serialize = "ro", serialize = "read")] |  | ||||||
|     ReadOnly, |  | ||||||
|     #[strum(serialize = "write-only", serialize = "wo", serialize = "write")] |  | ||||||
|     WriteOnly, |  | ||||||
|     #[strum(serialize = "deny", serialize = "none")] |  | ||||||
|     Deny, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[derive(Debug, EnumString, Display)] | #[derive(Debug, EnumString, Display)] | ||||||
| enum NtfyRole { | enum NtfyRole { | ||||||
|     #[strum(serialize = "user")] |     #[strum(serialize = "user")] | ||||||
|  | |||||||
							
								
								
									
										150
									
								
								harmony/src/modules/okd/ipxe.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								harmony/src/modules/okd/ipxe.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,150 @@ | |||||||
|  | use askama::Template; | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_types::net::Url; | ||||||
|  | use serde::Serialize; | ||||||
|  | use std::net::IpAddr; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::{FileContent, FilePath, Version}, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::Inventory, | ||||||
|  |     modules::{dhcp::DhcpScore, http::StaticFilesHttpScore, tftp::TftpScore}, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{DhcpServer, HttpServer, Router, TftpServer, Topology}, | ||||||
|  | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, new, Clone, Serialize)] | ||||||
|  | pub struct OkdIpxeScore { | ||||||
|  |     pub kickstart_filename: String, | ||||||
|  |     pub harmony_inventory_agent: String, | ||||||
|  |     pub cluster_pubkey_filename: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OkdIpxeScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         Box::new(IpxeInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OkdIpxeScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, new, Clone)] | ||||||
|  | pub struct IpxeInterpret { | ||||||
|  |     score: OkdIpxeScore, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> for IpxeInterpret { | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &T, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         let gateway_ip = topology.get_gateway(); | ||||||
|  | 
 | ||||||
|  |         let scores: Vec<Box<dyn Score<T>>> = vec![ | ||||||
|  |             Box::new(DhcpScore { | ||||||
|  |                 host_binding: vec![], | ||||||
|  |                 next_server: Some(topology.get_gateway()), | ||||||
|  |                 boot_filename: None, | ||||||
|  |                 filename: Some("undionly.kpxe".to_string()), | ||||||
|  |                 filename64: Some("ipxe.efi".to_string()), | ||||||
|  |                 filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), | ||||||
|  |             }), | ||||||
|  |             Box::new(TftpScore { | ||||||
|  |                 files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), | ||||||
|  |             }), | ||||||
|  |             Box::new(StaticFilesHttpScore { | ||||||
|  |                 // TODO The current russh based copy is way too slow, check for a lib update or use scp
 | ||||||
|  |                 // when available
 | ||||||
|  |                 //
 | ||||||
|  |                 // For now just run :
 | ||||||
|  |                 // scp -r data/pxe/okd/http_files/* root@192.168.1.1:/usr/local/http/
 | ||||||
|  |                 //
 | ||||||
|  |                 folder_to_serve: None, | ||||||
|  |                 // folder_to_serve: Some(Url::LocalFolder("./data/pxe/okd/http_files/".to_string())),
 | ||||||
|  |                 files: vec![ | ||||||
|  |                     FileContent { | ||||||
|  |                         path: FilePath::Relative("boot.ipxe".to_string()), | ||||||
|  |                         content: BootIpxeTpl { | ||||||
|  |                             gateway_ip: &gateway_ip, | ||||||
|  |                         } | ||||||
|  |                         .to_string(), | ||||||
|  |                     }, | ||||||
|  |                     FileContent { | ||||||
|  |                         path: FilePath::Relative(self.score.kickstart_filename.clone()), | ||||||
|  |                         content: InventoryKickstartTpl { | ||||||
|  |                             gateway_ip: &gateway_ip, | ||||||
|  |                             harmony_inventory_agent: &self.score.harmony_inventory_agent, | ||||||
|  |                             cluster_pubkey_filename: &self.score.cluster_pubkey_filename, | ||||||
|  |                         } | ||||||
|  |                         .to_string(), | ||||||
|  |                     }, | ||||||
|  |                     FileContent { | ||||||
|  |                         path: FilePath::Relative("fallback.ipxe".to_string()), | ||||||
|  |                         content: FallbackIpxeTpl { | ||||||
|  |                             gateway_ip: &gateway_ip, | ||||||
|  |                             kickstart_filename: &self.score.kickstart_filename, | ||||||
|  |                         } | ||||||
|  |                         .to_string(), | ||||||
|  |                     }, | ||||||
|  |                 ], | ||||||
|  |             }), | ||||||
|  |         ]; | ||||||
|  | 
 | ||||||
|  |         for score in scores { | ||||||
|  |             let result = score.interpret(inventory, topology).await; | ||||||
|  |             match result { | ||||||
|  |                 Ok(outcome) => match outcome.status { | ||||||
|  |                     InterpretStatus::SUCCESS => continue, | ||||||
|  |                     InterpretStatus::NOOP => continue, | ||||||
|  |                     _ => return Err(InterpretError::new(outcome.message)), | ||||||
|  |                 }, | ||||||
|  |                 Err(e) => return Err(e), | ||||||
|  |             }; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::success("Ipxe installed".to_string())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Ipxe | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Template)] | ||||||
|  | #[template(path = "boot.ipxe.j2")] | ||||||
|  | struct BootIpxeTpl<'a> { | ||||||
|  |     gateway_ip: &'a IpAddr, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Template)] | ||||||
|  | #[template(path = "fallback.ipxe.j2")] | ||||||
|  | struct FallbackIpxeTpl<'a> { | ||||||
|  |     gateway_ip: &'a IpAddr, | ||||||
|  |     kickstart_filename: &'a str, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Template)] | ||||||
|  | #[template(path = "inventory.kickstart.j2")] | ||||||
|  | struct InventoryKickstartTpl<'a> { | ||||||
|  |     gateway_ip: &'a IpAddr, | ||||||
|  |     cluster_pubkey_filename: &'a str, | ||||||
|  |     harmony_inventory_agent: &'a str, | ||||||
|  | } | ||||||
| @ -2,5 +2,6 @@ pub mod bootstrap_dhcp; | |||||||
| pub mod bootstrap_load_balancer; | pub mod bootstrap_load_balancer; | ||||||
| pub mod dhcp; | pub mod dhcp; | ||||||
| pub mod dns; | pub mod dns; | ||||||
|  | pub mod ipxe; | ||||||
| pub mod load_balancer; | pub mod load_balancer; | ||||||
| pub mod upgrade; | pub mod upgrade; | ||||||
|  | |||||||
| @ -5,12 +5,13 @@ use serde::Serialize; | |||||||
| use tokio::sync::RwLock; | use tokio::sync::RwLock; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::Topology, |     topology::Topology, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone)] | #[derive(Debug, Clone)] | ||||||
| pub struct OPNsenseShellCommandScore { | pub struct OPNsenseShellCommandScore { | ||||||
|  | |||||||
| @ -24,7 +24,7 @@ use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{ | |||||||
| use crate::topology::oberservability::monitoring::AlertReceiver; | use crate::topology::oberservability::monitoring::AlertReceiver; | ||||||
| use crate::topology::{K8sclient, Topology, k8s::K8sClient}; | use crate::topology::{K8sclient, Topology, k8s::K8sClient}; | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{Id, Version}, |     data::Version, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::monitoring::kube_prometheus::crd::{ |     modules::monitoring::kube_prometheus::crd::{ | ||||||
| @ -37,6 +37,7 @@ use crate::{ | |||||||
|     }, |     }, | ||||||
|     score::Score, |     score::Score, | ||||||
| }; | }; | ||||||
|  | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| use super::prometheus::PrometheusApplicationMonitoring; | use super::prometheus::PrometheusApplicationMonitoring; | ||||||
| 
 | 
 | ||||||
|  | |||||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user