Compare commits
	
		
			No commits in common. "665ed24f6514daf481d0d3b5861d45713c62cbd7" and "fedb3465484719bdd6b3c49ed1f213aa01e9c7fd" have entirely different histories.
		
	
	
		
			665ed24f65
			...
			fedb346548
		
	
		
							
								
								
									
										2
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							| @ -2,5 +2,3 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text | ||||
| grubx64.efi filter=lfs diff=lfs merge=lfs -text | ||||
| initrd filter=lfs diff=lfs merge=lfs -text | ||||
| linux filter=lfs diff=lfs merge=lfs -text | ||||
| data/okd/bin/* filter=lfs diff=lfs merge=lfs -text | ||||
| data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text | ||||
|  | ||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -3,7 +3,6 @@ private_repos/ | ||||
| 
 | ||||
| ### Harmony ### | ||||
| harmony.log | ||||
| data/okd/installation_files* | ||||
| 
 | ||||
| ### Helm ### | ||||
| # Chart dependencies | ||||
|  | ||||
| @ -1,20 +0,0 @@ | ||||
| { | ||||
|   "db_name": "SQLite", | ||||
|   "query": "SELECT host_id FROM host_role_mapping WHERE role = ?", | ||||
|   "describe": { | ||||
|     "columns": [ | ||||
|       { | ||||
|         "name": "host_id", | ||||
|         "ordinal": 0, | ||||
|         "type_info": "Text" | ||||
|       } | ||||
|     ], | ||||
|     "parameters": { | ||||
|       "Right": 1 | ||||
|     }, | ||||
|     "nullable": [ | ||||
|       false | ||||
|     ] | ||||
|   }, | ||||
|   "hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91" | ||||
| } | ||||
| @ -1,32 +0,0 @@ | ||||
| { | ||||
|   "db_name": "SQLite", | ||||
|   "query": "\n        SELECT\n            p1.id,\n            p1.version_id,\n            p1.data as \"data: Json<PhysicalHost>\"\n        FROM\n            physical_hosts p1\n        INNER JOIN (\n            SELECT\n                id,\n                MAX(version_id) AS max_version\n            FROM\n                physical_hosts\n            GROUP BY\n                id\n        ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n        ", | ||||
|   "describe": { | ||||
|     "columns": [ | ||||
|       { | ||||
|         "name": "id", | ||||
|         "ordinal": 0, | ||||
|         "type_info": "Text" | ||||
|       }, | ||||
|       { | ||||
|         "name": "version_id", | ||||
|         "ordinal": 1, | ||||
|         "type_info": "Text" | ||||
|       }, | ||||
|       { | ||||
|         "name": "data: Json<PhysicalHost>", | ||||
|         "ordinal": 2, | ||||
|         "type_info": "Blob" | ||||
|       } | ||||
|     ], | ||||
|     "parameters": { | ||||
|       "Right": 0 | ||||
|     }, | ||||
|     "nullable": [ | ||||
|       false, | ||||
|       false, | ||||
|       false | ||||
|     ] | ||||
|   }, | ||||
|   "hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067" | ||||
| } | ||||
| @ -1,12 +0,0 @@ | ||||
| { | ||||
|   "db_name": "SQLite", | ||||
|   "query": "\n        INSERT INTO host_role_mapping (host_id, role)\n        VALUES (?, ?)\n        ", | ||||
|   "describe": { | ||||
|     "columns": [], | ||||
|     "parameters": { | ||||
|       "Right": 2 | ||||
|     }, | ||||
|     "nullable": [] | ||||
|   }, | ||||
|   "hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff" | ||||
| } | ||||
							
								
								
									
										688
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										688
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,3 +1,4 @@ | ||||
| use log::debug; | ||||
| use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||
| 
 | ||||
| use crate::SERVICE_TYPE; | ||||
| @ -73,7 +74,7 @@ pub async fn discover() { | ||||
|     // }
 | ||||
| } | ||||
| 
 | ||||
| async fn _discover_example() { | ||||
| async fn discover_example() { | ||||
|     use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||
| 
 | ||||
|     // Create a daemon
 | ||||
|  | ||||
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/kubectl
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/kubectl
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/oc
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/oc
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/oc_README.md
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/oc_README.md
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install_README.md
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install_README.md
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							| @ -1 +0,0 @@ | ||||
| scos-9.0.20250510-0-live-initramfs.x86_64.img | ||||
| @ -1 +0,0 @@ | ||||
| scos-9.0.20250510-0-live-kernel.x86_64 | ||||
| @ -1 +0,0 @@ | ||||
| scos-9.0.20250510-0-live-rootfs.x86_64.img | ||||
| @ -1,8 +0,0 @@ | ||||
| ## Bios settings | ||||
| 
 | ||||
| 1. CSM : Disabled (compatibility support to boot gpt formatted drives) | ||||
| 2. Secure boot : disabled | ||||
| 3. Boot order : | ||||
|     1. Local Hard drive | ||||
|     2. PXE IPv4 | ||||
| 4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error | ||||
| @ -2,7 +2,7 @@ use harmony::{ | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         dummy::{ErrorScore, PanicScore, SuccessScore}, | ||||
|         inventory::LaunchDiscoverInventoryAgentScore, | ||||
|         inventory::DiscoverInventoryAgentScore, | ||||
|     }, | ||||
|     topology::LocalhostTopology, | ||||
| }; | ||||
| @ -16,7 +16,7 @@ async fn main() { | ||||
|             Box::new(SuccessScore {}), | ||||
|             Box::new(ErrorScore {}), | ||||
|             Box::new(PanicScore {}), | ||||
|             Box::new(LaunchDiscoverInventoryAgentScore { | ||||
|             Box::new(DiscoverInventoryAgentScore { | ||||
|                 discovery_timeout: Some(10), | ||||
|             }), | ||||
|         ], | ||||
|  | ||||
| @ -13,7 +13,6 @@ harmony_types = { path = "../../harmony_types" } | ||||
| cidr = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| harmony_macros = { path = "../../harmony_macros" } | ||||
| harmony_secret = { path = "../../harmony_secret" } | ||||
| log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
|  | ||||
| @ -5,24 +5,22 @@ use std::{ | ||||
| 
 | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     config::secret::SshKeyPair, | ||||
|     data::{FileContent, FilePath}, | ||||
|     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
|         http::StaticFilesHttpScore, | ||||
|         ipxe::IpxeScore, | ||||
|         okd::{ | ||||
|             bootstrap_dhcp::OKDBootstrapDhcpScore, | ||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, | ||||
|             dns::OKDDnsScore, ipxe::OKDIpxeScore, | ||||
|             dns::OKDDnsScore, | ||||
|         }, | ||||
|         tftp::TftpScore, | ||||
|     }, | ||||
|     topology::{LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, mac_address}; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_types::net::Url; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| @ -126,28 +124,14 @@ async fn main() { | ||||
|     let load_balancer_score = | ||||
|         harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); | ||||
| 
 | ||||
|     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||
| 
 | ||||
|     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); | ||||
|     let http_score = StaticFilesHttpScore { | ||||
|         folder_to_serve: Some(Url::LocalFolder( | ||||
|             "./data/watchguard/pxe-http-files".to_string(), | ||||
|         )), | ||||
|         files: vec![], | ||||
|         remote_path: None, | ||||
|     }; | ||||
| 
 | ||||
|     let kickstart_filename = "inventory.kickstart".to_string(); | ||||
|     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); | ||||
| 
 | ||||
|     let ipxe_score = OKDIpxeScore { | ||||
|         kickstart_filename, | ||||
|         harmony_inventory_agent, | ||||
|         cluster_pubkey: FileContent { | ||||
|             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||
|             content: ssh_key.public, | ||||
|         }, | ||||
|     }; | ||||
|     let ipxe_score = IpxeScore::new(); | ||||
| 
 | ||||
|     harmony_tui::run( | ||||
|         inventory, | ||||
|  | ||||
| @ -1,21 +0,0 @@ | ||||
| [package] | ||||
| name = "example-okd-install" | ||||
| edition = "2024" | ||||
| version.workspace = true | ||||
| readme.workspace = true | ||||
| license.workspace = true | ||||
| publish = false | ||||
| 
 | ||||
| [dependencies] | ||||
| harmony = { path = "../../harmony" } | ||||
| harmony_cli = { path = "../../harmony_cli" } | ||||
| harmony_types = { path = "../../harmony_types" } | ||||
| harmony_secret = { path = "../../harmony_secret" } | ||||
| harmony_secret_derive = { path = "../../harmony_secret_derive" } | ||||
| cidr = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| harmony_macros = { path = "../../harmony_macros" } | ||||
| log = { workspace = true } | ||||
| env_logger = { workspace = true } | ||||
| url = { workspace = true } | ||||
| serde.workspace = true | ||||
| @ -1,4 +0,0 @@ | ||||
| export HARMONY_SECRET_NAMESPACE=example-vms | ||||
| export HARMONY_SECRET_STORE=file | ||||
| export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info  | ||||
| export RUST_LOG=info | ||||
| @ -1,34 +0,0 @@ | ||||
| mod topology; | ||||
| 
 | ||||
| use crate::topology::{get_inventory, get_topology}; | ||||
| use harmony::{ | ||||
|     config::secret::SshKeyPair, | ||||
|     data::{FileContent, FilePath}, | ||||
|     modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore}, | ||||
|     score::Score, | ||||
|     topology::HAClusterTopology, | ||||
| }; | ||||
| use harmony_secret::SecretManager; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let inventory = get_inventory(); | ||||
|     let topology = get_topology().await; | ||||
| 
 | ||||
|     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||
| 
 | ||||
|     let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![Box::new(OKDIpxeScore { | ||||
|         kickstart_filename: "inventory.kickstart".to_string(), | ||||
|         harmony_inventory_agent: "harmony_inventory_agent".to_string(), | ||||
|         cluster_pubkey: FileContent { | ||||
|             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||
|             content: ssh_key.public, | ||||
|         }, | ||||
|     })]; | ||||
| 
 | ||||
|     scores.append(&mut OKDInstallationPipeline::get_all_scores().await); | ||||
| 
 | ||||
|     harmony_cli::run(inventory, topology, scores, None) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| } | ||||
| @ -1,77 +0,0 @@ | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     inventory::Inventory, | ||||
|     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, ipv4}; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::{net::IpAddr, sync::Arc}; | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||
| struct OPNSenseFirewallConfig { | ||||
|     username: String, | ||||
|     password: String, | ||||
| } | ||||
| 
 | ||||
| pub async fn get_topology() -> HAClusterTopology { | ||||
|     let firewall = harmony::topology::LogicalHost { | ||||
|         ip: ip!("192.168.1.1"), | ||||
|         name: String::from("opnsense-1"), | ||||
|     }; | ||||
| 
 | ||||
|     let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await; | ||||
|     let config = config.unwrap(); | ||||
| 
 | ||||
|     let opnsense = Arc::new( | ||||
|         harmony::infra::opnsense::OPNSenseFirewall::new( | ||||
|             firewall, | ||||
|             None, | ||||
|             &config.username, | ||||
|             &config.password, | ||||
|         ) | ||||
|         .await, | ||||
|     ); | ||||
|     let lan_subnet = ipv4!("192.168.1.0"); | ||||
|     let gateway_ipv4 = ipv4!("192.168.1.1"); | ||||
|     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||
|     harmony::topology::HAClusterTopology { | ||||
|         domain_name: "demo.harmony.mcd".to_string(), | ||||
|         router: Arc::new(UnmanagedRouter::new( | ||||
|             gateway_ip, | ||||
|             Ipv4Cidr::new(lan_subnet, 24).unwrap(), | ||||
|         )), | ||||
|         load_balancer: opnsense.clone(), | ||||
|         firewall: opnsense.clone(), | ||||
|         tftp_server: opnsense.clone(), | ||||
|         http_server: opnsense.clone(), | ||||
|         dhcp_server: opnsense.clone(), | ||||
|         dns_server: opnsense.clone(), | ||||
|         control_plane: vec![LogicalHost { | ||||
|             ip: ip!("192.168.1.20"), | ||||
|             name: "master".to_string(), | ||||
|         }], | ||||
|         bootstrap_host: LogicalHost { | ||||
|             ip: ip!("192.168.1.10"), | ||||
|             name: "bootstrap".to_string(), | ||||
|         }, | ||||
|         workers: vec![], | ||||
|         switch: vec![], | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub fn get_inventory() -> Inventory { | ||||
|     Inventory { | ||||
|         location: Location::new( | ||||
|             "Some virtual machine or maybe a physical machine if you're cool".to_string(), | ||||
|             "testopnsense".to_string(), | ||||
|         ), | ||||
|         switch: SwitchGroup::from([]), | ||||
|         firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), | ||||
|         storage_host: vec![], | ||||
|         worker_host: vec![], | ||||
|         control_plane_host: vec![], | ||||
|     } | ||||
| } | ||||
| @ -1,7 +0,0 @@ | ||||
| -----BEGIN OPENSSH PRIVATE KEY----- | ||||
| b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW | ||||
| QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA | ||||
| jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA | ||||
| AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ | ||||
| /dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ== | ||||
| -----END OPENSSH PRIVATE KEY----- | ||||
| @ -1 +0,0 @@ | ||||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2 | ||||
| @ -1,12 +1,7 @@ | ||||
| mod topology; | ||||
| 
 | ||||
| use crate::topology::{get_inventory, get_topology}; | ||||
| use harmony::{ | ||||
|     config::secret::SshKeyPair, | ||||
|     data::{FileContent, FilePath}, | ||||
|     modules::okd::ipxe::OKDIpxeScore, | ||||
| }; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony::modules::okd::ipxe::OkdIpxeScore; | ||||
| 
 | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
| @ -14,16 +9,13 @@ async fn main() { | ||||
|     let topology = get_topology().await; | ||||
| 
 | ||||
|     let kickstart_filename = "inventory.kickstart".to_string(); | ||||
|     let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string(); | ||||
|     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); | ||||
|     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||
| 
 | ||||
|     let ipxe_score = OKDIpxeScore { | ||||
|     let ipxe_score = OkdIpxeScore { | ||||
|         kickstart_filename, | ||||
|         harmony_inventory_agent, | ||||
|         cluster_pubkey: FileContent { | ||||
|             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||
|             content: ssh_key.public, | ||||
|         }, | ||||
|         cluster_pubkey_filename, | ||||
|     }; | ||||
| 
 | ||||
|     harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None) | ||||
|  | ||||
| @ -1,22 +1,28 @@ | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     config::secret::OPNSenseFirewallCredentials, | ||||
|     hardware::{Location, SwitchGroup}, | ||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     inventory::Inventory, | ||||
|     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||
| }; | ||||
| use harmony_macros::{ip, ipv4}; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_secret::{Secret, SecretManager}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::{net::IpAddr, sync::Arc}; | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||
| struct OPNSenseFirewallConfig { | ||||
|     username: String, | ||||
|     password: String, | ||||
| } | ||||
| 
 | ||||
| pub async fn get_topology() -> HAClusterTopology { | ||||
|     let firewall = harmony::topology::LogicalHost { | ||||
|         ip: ip!("192.168.1.1"), | ||||
|         name: String::from("opnsense-1"), | ||||
|     }; | ||||
| 
 | ||||
|     let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await; | ||||
|     let config = SecretManager::get::<OPNSenseFirewallConfig>().await; | ||||
|     let config = config.unwrap(); | ||||
| 
 | ||||
|     let opnsense = Arc::new( | ||||
|  | ||||
| @ -5,7 +5,7 @@ use std::{ | ||||
| 
 | ||||
| use cidr::Ipv4Cidr; | ||||
| use harmony::{ | ||||
|     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||
|     infra::opnsense::OPNSenseManagementInterface, | ||||
|     inventory::Inventory, | ||||
|     modules::{ | ||||
| @ -85,7 +85,6 @@ async fn main() { | ||||
|             "./data/watchguard/pxe-http-files".to_string(), | ||||
|         )), | ||||
|         files: vec![], | ||||
|         remote_path: None, | ||||
|     }; | ||||
| 
 | ||||
|     harmony_tui::run( | ||||
|  | ||||
| @ -9,7 +9,6 @@ use harmony::{ | ||||
|     }, | ||||
|     topology::{ | ||||
|         BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService, | ||||
|         SSL, | ||||
|     }, | ||||
| }; | ||||
| use harmony_macros::ipv4; | ||||
| @ -48,7 +47,6 @@ fn build_large_score() -> LoadBalancerScore { | ||||
|                 .to_string(), | ||||
|             HttpMethod::GET, | ||||
|             HttpStatusCode::Success2xx, | ||||
|             SSL::Disabled, | ||||
|         )), | ||||
|     }; | ||||
|     LoadBalancerScore { | ||||
|  | ||||
| @ -68,11 +68,9 @@ thiserror.workspace = true | ||||
| once_cell = "1.21.3" | ||||
| walkdir = "2.5.0" | ||||
| harmony_inventory_agent = { path = "../harmony_inventory_agent" } | ||||
| harmony_secret_derive = { path = "../harmony_secret_derive" } | ||||
| harmony_secret = { path = "../harmony_secret" } | ||||
| harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } | ||||
| askama.workspace = true | ||||
| sqlx.workspace = true | ||||
| inquire.workspace = true | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| pretty_assertions.workspace = true | ||||
|  | ||||
| @ -1,5 +1,3 @@ | ||||
| pub mod secret; | ||||
| 
 | ||||
| use lazy_static::lazy_static; | ||||
| use std::path::PathBuf; | ||||
| 
 | ||||
| @ -1,20 +0,0 @@ | ||||
| use harmony_secret_derive::Secret; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||
| pub struct OPNSenseFirewallCredentials { | ||||
|     pub username: String, | ||||
|     pub password: String, | ||||
| } | ||||
| 
 | ||||
| // TODO we need a better way to handle multiple "instances" of the same secret structure.
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||
| pub struct SshKeyPair { | ||||
|     pub private: String, | ||||
|     pub public: String, | ||||
| } | ||||
| 
 | ||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||
| pub struct RedhatSecret { | ||||
|     pub pull_secret: String, | ||||
| } | ||||
| @ -1,3 +1,5 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use derive_new::new; | ||||
| use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive}; | ||||
| use harmony_types::net::MacAddress; | ||||
| @ -8,7 +10,7 @@ pub type HostGroup = Vec<PhysicalHost>; | ||||
| pub type SwitchGroup = Vec<Switch>; | ||||
| pub type FirewallGroup = Vec<PhysicalHost>; | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub struct PhysicalHost { | ||||
|     pub id: Id, | ||||
|     pub category: HostCategory, | ||||
| @ -149,98 +151,6 @@ impl PhysicalHost { | ||||
|         parts.join(" | ") | ||||
|     } | ||||
| 
 | ||||
|     pub fn parts_list(&self) -> String { | ||||
|         let PhysicalHost { | ||||
|             id, | ||||
|             category, | ||||
|             network, | ||||
|             storage, | ||||
|             labels, | ||||
|             memory_modules, | ||||
|             cpus, | ||||
|         } = self; | ||||
| 
 | ||||
|         let mut parts_list = String::new(); | ||||
|         parts_list.push_str("\n\n====================="); | ||||
|         parts_list.push_str(&format!("\nHost ID {id}")); | ||||
|         parts_list.push_str("\n====================="); | ||||
|         parts_list.push_str("\n\n====================="); | ||||
|         parts_list.push_str(&format!("\nCPU count {}", cpus.len())); | ||||
|         parts_list.push_str("\n====================="); | ||||
|         cpus.iter().for_each(|c| { | ||||
|             let CPU { | ||||
|                 model, | ||||
|                 vendor, | ||||
|                 cores, | ||||
|                 threads, | ||||
|                 frequency_mhz, | ||||
|             } = c; | ||||
|             parts_list.push_str(&format!( | ||||
|                 "\n{vendor} {model}, {cores}/{threads} {}Ghz", | ||||
|                 *frequency_mhz as f64 / 1000.0 | ||||
|             )); | ||||
|         }); | ||||
| 
 | ||||
|         parts_list.push_str("\n\n====================="); | ||||
|         parts_list.push_str(&format!("\nNetwork Interfaces count {}", network.len())); | ||||
|         parts_list.push_str("\n====================="); | ||||
|         network.iter().for_each(|nic| { | ||||
|             parts_list.push_str(&format!( | ||||
|                 "\nNic({} {}Gbps mac({}) ipv4({}), ipv6({})", | ||||
|                 nic.name, | ||||
|                 nic.speed_mbps.unwrap_or(0) / 1000, | ||||
|                 nic.mac_address, | ||||
|                 nic.ipv4_addresses.join(","), | ||||
|                 nic.ipv6_addresses.join(",") | ||||
|             )); | ||||
|         }); | ||||
| 
 | ||||
|         parts_list.push_str("\n\n====================="); | ||||
|         parts_list.push_str(&format!("\nStorage drives count {}", storage.len())); | ||||
|         parts_list.push_str("\n====================="); | ||||
|         storage.iter().for_each(|drive| { | ||||
|             let StorageDrive { | ||||
|                 name, | ||||
|                 model, | ||||
|                 serial, | ||||
|                 size_bytes, | ||||
|                 logical_block_size: _, | ||||
|                 physical_block_size: _, | ||||
|                 rotational: _, | ||||
|                 wwn: _, | ||||
|                 interface_type, | ||||
|                 smart_status, | ||||
|             } = drive; | ||||
|             parts_list.push_str(&format!( | ||||
|                 "\n{name} {}Gb {model} {interface_type} smart({smart_status:?}) {serial}", | ||||
|                 size_bytes / 1000 / 1000 / 1000 | ||||
|             )); | ||||
|         }); | ||||
| 
 | ||||
|         parts_list.push_str("\n\n====================="); | ||||
|         parts_list.push_str(&format!("\nMemory modules count {}", memory_modules.len())); | ||||
|         parts_list.push_str("\n====================="); | ||||
|         memory_modules.iter().for_each(|mem| { | ||||
|             let MemoryModule { | ||||
|                 size_bytes, | ||||
|                 speed_mhz, | ||||
|                 manufacturer, | ||||
|                 part_number, | ||||
|                 serial_number, | ||||
|                 rank, | ||||
|             } = mem; | ||||
|             parts_list.push_str(&format!( | ||||
|                 "\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})", | ||||
|                 size_bytes / 1000 / 1000 / 1000, | ||||
|                 speed_mhz.unwrap_or(0), | ||||
|                 manufacturer.as_ref().unwrap_or(&String::new()), | ||||
|                 part_number.as_ref().unwrap_or(&String::new()), | ||||
|             )); | ||||
|         }); | ||||
| 
 | ||||
|         parts_list | ||||
|     } | ||||
| 
 | ||||
|     pub fn cluster_mac(&self) -> MacAddress { | ||||
|         self.network | ||||
|             .first() | ||||
| @ -263,10 +173,6 @@ impl PhysicalHost { | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn get_mac_address(&self) -> Vec<MacAddress> { | ||||
|         self.network.iter().map(|nic| nic.mac_address).collect() | ||||
|     } | ||||
| 
 | ||||
|     pub fn label(mut self, name: String, value: String) -> Self { | ||||
|         self.labels.push(Label { name, value }); | ||||
|         self | ||||
| @ -315,6 +221,15 @@ impl PhysicalHost { | ||||
| //     }
 | ||||
| // }
 | ||||
| 
 | ||||
| impl<'de> Deserialize<'de> for PhysicalHost { | ||||
|     fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> | ||||
|     where | ||||
|         D: serde::Deserializer<'de>, | ||||
|     { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(new, Serialize)] | ||||
| pub struct ManualManagementInterface; | ||||
| 
 | ||||
| @ -358,13 +273,16 @@ where | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| pub enum HostCategory { | ||||
|     Server, | ||||
|     Firewall, | ||||
|     Switch, | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| use harmony_macros::mac_address; | ||||
| 
 | ||||
| use harmony_types::id::Id; | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize)] | ||||
| @ -373,7 +291,7 @@ pub struct Switch { | ||||
|     _management_interface: NetworkInterface, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, new, Clone, Serialize, Deserialize)] | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct Label { | ||||
|     pub name: String, | ||||
|     pub value: String, | ||||
|  | ||||
| @ -32,7 +32,6 @@ pub enum InterpretName { | ||||
|     K8sPrometheusCrdAlerting, | ||||
|     DiscoverInventoryAgent, | ||||
|     CephClusterHealth, | ||||
|     Custom(&'static str), | ||||
|     RHOBAlerting, | ||||
| } | ||||
| 
 | ||||
| @ -62,7 +61,6 @@ impl std::fmt::Display for InterpretName { | ||||
|             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), | ||||
|             InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), | ||||
|             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), | ||||
|             InterpretName::Custom(name) => f.write_str(name), | ||||
|             InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), | ||||
|         } | ||||
|     } | ||||
| @ -144,12 +142,6 @@ impl From<PreparationError> for InterpretError { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<harmony_secret::SecretStoreError> for InterpretError { | ||||
|     fn from(value: harmony_secret::SecretStoreError) -> Self { | ||||
|         InterpretError::new(format!("Interpret error : {value}")) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<ExecutorError> for InterpretError { | ||||
|     fn from(value: ExecutorError) -> Self { | ||||
|         Self { | ||||
|  | ||||
| @ -17,14 +17,12 @@ impl InventoryFilter { | ||||
| 
 | ||||
| use derive_new::new; | ||||
| use log::info; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use strum::EnumIter; | ||||
| 
 | ||||
| use crate::hardware::{ManagementInterface, ManualManagementInterface}; | ||||
| 
 | ||||
| use super::{ | ||||
|     filter::Filter, | ||||
|     hardware::{HostGroup, Location, SwitchGroup}, | ||||
|     hardware::{FirewallGroup, HostGroup, Location, SwitchGroup}, | ||||
| }; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| @ -63,11 +61,3 @@ impl Inventory { | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone, EnumIter)] | ||||
| pub enum HostRole { | ||||
|     Bootstrap, | ||||
|     ControlPlane, | ||||
|     Worker, | ||||
|     Storage, | ||||
| } | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| use async_trait::async_trait; | ||||
| 
 | ||||
| use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole}; | ||||
| use crate::hardware::PhysicalHost; | ||||
| 
 | ||||
| /// Errors that can occur within the repository layer.
 | ||||
| #[derive(thiserror::Error, Debug)] | ||||
| @ -15,12 +15,6 @@ pub enum RepoError { | ||||
|     ConnectionFailed(String), | ||||
| } | ||||
| 
 | ||||
| impl From<RepoError> for InterpretError { | ||||
|     fn from(value: RepoError) -> Self { | ||||
|         InterpretError::new(format!("Interpret error : {value}")) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // --- Trait and Implementation ---
 | ||||
| 
 | ||||
| /// Defines the contract for inventory persistence.
 | ||||
| @ -28,11 +22,4 @@ impl From<RepoError> for InterpretError { | ||||
| pub trait InventoryRepository: Send + Sync + 'static { | ||||
|     async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; | ||||
|     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>; | ||||
|     async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>; | ||||
|     async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError>; | ||||
|     async fn save_role_mapping( | ||||
|         &self, | ||||
|         role: &HostRole, | ||||
|         host: &PhysicalHost, | ||||
|     ) -> Result<(), RepoError>; | ||||
| } | ||||
|  | ||||
| @ -69,26 +69,6 @@ impl K8sclient for HAClusterTopology { | ||||
| } | ||||
| 
 | ||||
| impl HAClusterTopology { | ||||
|     // TODO this is a hack to avoid refactoring
 | ||||
|     pub fn get_cluster_name(&self) -> String { | ||||
|         self.domain_name | ||||
|             .split(".") | ||||
|             .next() | ||||
|             .expect("Cluster domain name must not be empty") | ||||
|             .to_string() | ||||
|     } | ||||
| 
 | ||||
|     pub fn get_cluster_base_domain(&self) -> String { | ||||
|         let base_domain = self | ||||
|             .domain_name | ||||
|             .strip_prefix(&self.get_cluster_name()) | ||||
|             .expect("cluster domain must start with cluster name"); | ||||
|         base_domain | ||||
|             .strip_prefix(".") | ||||
|             .unwrap_or(base_domain) | ||||
|             .to_string() | ||||
|     } | ||||
| 
 | ||||
|     pub fn autoload() -> Self { | ||||
|         let dummy_infra = Arc::new(DummyInfra {}); | ||||
|         let dummy_host = LogicalHost { | ||||
| @ -181,14 +161,6 @@ impl DhcpServer for HAClusterTopology { | ||||
|         self.dhcp_server.set_pxe_options(options).await | ||||
|     } | ||||
| 
 | ||||
|     async fn set_dhcp_range( | ||||
|         &self, | ||||
|         start: &IpAddress, | ||||
|         end: &IpAddress, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         self.dhcp_server.set_dhcp_range(start, end).await | ||||
|     } | ||||
| 
 | ||||
|     fn get_ip(&self) -> IpAddress { | ||||
|         self.dhcp_server.get_ip() | ||||
|     } | ||||
| @ -237,12 +209,8 @@ impl Router for HAClusterTopology { | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl HttpServer for HAClusterTopology { | ||||
|     async fn serve_files( | ||||
|         &self, | ||||
|         url: &Url, | ||||
|         remote_path: &Option<String>, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         self.http_server.serve_files(url, remote_path).await | ||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { | ||||
|         self.http_server.serve_files(url).await | ||||
|     } | ||||
| 
 | ||||
|     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { | ||||
| @ -330,13 +298,6 @@ impl DhcpServer for DummyInfra { | ||||
|     async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
|     async fn set_dhcp_range( | ||||
|         &self, | ||||
|         start: &IpAddress, | ||||
|         end: &IpAddress, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
|     fn get_ip(&self) -> IpAddress { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
| @ -401,11 +362,7 @@ impl TftpServer for DummyInfra { | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl HttpServer for DummyInfra { | ||||
|     async fn serve_files( | ||||
|         &self, | ||||
|         _url: &Url, | ||||
|         _remote_path: &Option<String>, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|     async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> { | ||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||
|     } | ||||
|     async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> { | ||||
|  | ||||
| @ -5,11 +5,7 @@ use harmony_types::net::IpAddress; | ||||
| use harmony_types::net::Url; | ||||
| #[async_trait] | ||||
| pub trait HttpServer: Send + Sync { | ||||
|     async fn serve_files( | ||||
|         &self, | ||||
|         url: &Url, | ||||
|         remote_path: &Option<String>, | ||||
|     ) -> Result<(), ExecutorError>; | ||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>; | ||||
|     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>; | ||||
|     fn get_ip(&self) -> IpAddress; | ||||
| 
 | ||||
|  | ||||
| @ -102,17 +102,8 @@ pub enum HttpStatusCode { | ||||
|     ServerError5xx, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, PartialEq, Serialize)] | ||||
| pub enum SSL { | ||||
|     SSL, | ||||
|     Disabled, | ||||
|     Default, | ||||
|     SNI, | ||||
|     Other(String), | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone, PartialEq, Serialize)] | ||||
| pub enum HealthCheck { | ||||
|     HTTP(String, HttpMethod, HttpStatusCode, SSL), | ||||
|     HTTP(String, HttpMethod, HttpStatusCode), | ||||
|     TCP(Option<u16>), | ||||
| } | ||||
|  | ||||
| @ -11,21 +11,15 @@ use super::{LogicalHost, k8s::K8sClient}; | ||||
| #[derive(Debug)] | ||||
| pub struct DHCPStaticEntry { | ||||
|     pub name: String, | ||||
|     pub mac: Vec<MacAddress>, | ||||
|     pub mac: MacAddress, | ||||
|     pub ip: Ipv4Addr, | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for DHCPStaticEntry { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         let mac = self | ||||
|             .mac | ||||
|             .iter() | ||||
|             .map(|m| m.to_string()) | ||||
|             .collect::<Vec<String>>() | ||||
|             .join(","); | ||||
|         f.write_fmt(format_args!( | ||||
|             "DHCPStaticEntry : name {}, mac {}, ip {}", | ||||
|             self.name, mac, self.ip | ||||
|             self.name, self.mac, self.ip | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -47,7 +41,6 @@ impl std::fmt::Debug for dyn Firewall { | ||||
| pub struct NetworkDomain { | ||||
|     pub name: String, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| pub trait K8sclient: Send + Sync { | ||||
|     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>; | ||||
| @ -66,8 +59,6 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug { | ||||
|     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; | ||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; | ||||
|     async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; | ||||
|     async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress) | ||||
|     -> Result<(), ExecutorError>; | ||||
|     fn get_ip(&self) -> IpAddress; | ||||
|     fn get_host(&self) -> LogicalHost; | ||||
|     async fn commit_config(&self) -> Result<(), ExecutorError>; | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| use crate::{ | ||||
|     hardware::PhysicalHost, | ||||
|     inventory::{HostRole, InventoryRepository, RepoError}, | ||||
|     inventory::{InventoryRepository, RepoError}, | ||||
| }; | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::id::Id; | ||||
| @ -46,104 +46,20 @@ impl InventoryRepository for SqliteInventoryRepository { | ||||
|     } | ||||
| 
 | ||||
|     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> { | ||||
|         let row = sqlx::query_as!( | ||||
|         let _row = sqlx::query_as!( | ||||
|             DbHost, | ||||
|             r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, | ||||
|             host_id | ||||
|         ) | ||||
|         .fetch_optional(&self.pool) | ||||
|         .await?; | ||||
| 
 | ||||
|         Ok(row.map(|r| r.data.0)) | ||||
|     } | ||||
| 
 | ||||
|     async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> { | ||||
|         let db_hosts = sqlx::query_as!( | ||||
|             DbHost, | ||||
|             r#" | ||||
|         SELECT | ||||
|             p1.id, | ||||
|             p1.version_id, | ||||
|             p1.data as "data: Json<PhysicalHost>" | ||||
|         FROM | ||||
|             physical_hosts p1 | ||||
|         INNER JOIN ( | ||||
|             SELECT | ||||
|                 id, | ||||
|                 MAX(version_id) AS max_version | ||||
|             FROM | ||||
|                 physical_hosts | ||||
|             GROUP BY | ||||
|                 id | ||||
|         ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version | ||||
|         "#
 | ||||
|         ) | ||||
|         .fetch_all(&self.pool) | ||||
|         .await?; | ||||
| 
 | ||||
|         let hosts = db_hosts.into_iter().map(|row| row.data.0).collect(); | ||||
| 
 | ||||
|         Ok(hosts) | ||||
|     } | ||||
| 
 | ||||
|     async fn save_role_mapping( | ||||
|         &self, | ||||
|         role: &HostRole, | ||||
|         host: &PhysicalHost, | ||||
|     ) -> Result<(), RepoError> { | ||||
|         let host_id = host.id.to_string(); | ||||
| 
 | ||||
|         sqlx::query!( | ||||
|             r#" | ||||
|         INSERT INTO host_role_mapping (host_id, role) | ||||
|         VALUES (?, ?) | ||||
|         "#,
 | ||||
|             host_id, | ||||
|             role | ||||
|         ) | ||||
|         .execute(&self.pool) | ||||
|         .await?; | ||||
| 
 | ||||
|         info!("Saved role mapping for host '{}' as '{:?}'", host.id, role); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError> { | ||||
|         struct HostIdRow { | ||||
|             host_id: String, | ||||
|         } | ||||
| 
 | ||||
|         let role_str = format!("{:?}", role); | ||||
| 
 | ||||
|         let host_id_rows = sqlx::query_as!( | ||||
|             HostIdRow, | ||||
|             "SELECT host_id FROM host_role_mapping WHERE role = ?", | ||||
|             role_str | ||||
|         ) | ||||
|         .fetch_all(&self.pool) | ||||
|         .await?; | ||||
| 
 | ||||
|         let mut hosts = Vec::with_capacity(host_id_rows.len()); | ||||
|         for row in host_id_rows { | ||||
|             match self.get_latest_by_id(&row.host_id).await? { | ||||
|                 Some(host) => hosts.push(host), | ||||
|                 None => { | ||||
|                     log::warn!( | ||||
|                         "Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.", | ||||
|                         row.host_id | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(hosts) | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| use sqlx::types::Json; | ||||
| struct DbHost { | ||||
|     data: Json<PhysicalHost>, | ||||
|     id: String, | ||||
|     version_id: String, | ||||
|     id: Id, | ||||
|     version_id: Id, | ||||
| } | ||||
|  | ||||
| @ -17,13 +17,13 @@ impl DhcpServer for OPNSenseFirewall { | ||||
|     } | ||||
| 
 | ||||
|     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> { | ||||
|         let mac: Vec<String> = entry.mac.iter().map(MacAddress::to_string).collect(); | ||||
|         let mac: String = String::from(&entry.mac); | ||||
| 
 | ||||
|         { | ||||
|             let mut writable_opnsense = self.opnsense_config.write().await; | ||||
|             writable_opnsense | ||||
|                 .dhcp() | ||||
|                 .add_static_mapping(&mac, &entry.ip, &entry.name) | ||||
|                 .add_static_mapping(&mac, entry.ip, &entry.name) | ||||
|                 .unwrap(); | ||||
|         } | ||||
| 
 | ||||
| @ -68,19 +68,4 @@ impl DhcpServer for OPNSenseFirewall { | ||||
|                 ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) | ||||
|             }) | ||||
|     } | ||||
| 
 | ||||
|     async fn set_dhcp_range( | ||||
|         &self, | ||||
|         start: &IpAddress, | ||||
|         end: &IpAddress, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|         let mut writable_opnsense = self.opnsense_config.write().await; | ||||
|         writable_opnsense | ||||
|             .dhcp() | ||||
|             .set_dhcp_range(&start.to_string(), &end.to_string()) | ||||
|             .await | ||||
|             .map_err(|dhcp_error| { | ||||
|                 ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}")) | ||||
|             }) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| use crate::infra::opnsense::Host; | ||||
| use crate::infra::opnsense::LogicalHost; | ||||
| use crate::{ | ||||
|     executors::ExecutorError, | ||||
| @ -11,22 +12,21 @@ use super::OPNSenseFirewall; | ||||
| #[async_trait] | ||||
| impl DnsServer for OPNSenseFirewall { | ||||
|     async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> { | ||||
|         todo!("Refactor this to use dnsmasq") | ||||
|         // let mut writable_opnsense = self.opnsense_config.write().await;
 | ||||
|         // let mut dns = writable_opnsense.dns();
 | ||||
|         // let hosts = hosts
 | ||||
|         //     .iter()
 | ||||
|         //     .map(|h| {
 | ||||
|         //         Host::new(
 | ||||
|         //             h.host.clone(),
 | ||||
|         //             h.domain.clone(),
 | ||||
|         //             h.record_type.to_string(),
 | ||||
|         //             h.value.to_string(),
 | ||||
|         //         )
 | ||||
|         //     })
 | ||||
|         //     .collect();
 | ||||
|         // dns.add_static_mapping(hosts);
 | ||||
|         // Ok(())
 | ||||
|         let mut writable_opnsense = self.opnsense_config.write().await; | ||||
|         let mut dns = writable_opnsense.dns(); | ||||
|         let hosts = hosts | ||||
|             .iter() | ||||
|             .map(|h| { | ||||
|                 Host::new( | ||||
|                     h.host.clone(), | ||||
|                     h.domain.clone(), | ||||
|                     h.record_type.to_string(), | ||||
|                     h.value.to_string(), | ||||
|                 ) | ||||
|             }) | ||||
|             .collect(); | ||||
|         dns.register_hosts(hosts); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     fn remove_record( | ||||
| @ -38,26 +38,25 @@ impl DnsServer for OPNSenseFirewall { | ||||
|     } | ||||
| 
 | ||||
|     async fn list_records(&self) -> Vec<crate::topology::DnsRecord> { | ||||
|         todo!("Refactor this to use dnsmasq") | ||||
|         // self.opnsense_config
 | ||||
|         //     .write()
 | ||||
|         //     .await
 | ||||
|         //     .dns()
 | ||||
|         //     .get_hosts()
 | ||||
|         //     .iter()
 | ||||
|         //     .map(|h| DnsRecord {
 | ||||
|         //         host: h.hostname.clone(),
 | ||||
|         //         domain: h.domain.clone(),
 | ||||
|         //         record_type: h
 | ||||
|         //             .rr
 | ||||
|         //             .parse()
 | ||||
|         //             .expect("received invalid record type {h.rr} from opnsense"),
 | ||||
|         //         value: h
 | ||||
|         //             .server
 | ||||
|         //             .parse()
 | ||||
|         //             .expect("received invalid ipv4 record from opnsense {h.server}"),
 | ||||
|         //     })
 | ||||
|         //     .collect()
 | ||||
|         self.opnsense_config | ||||
|             .write() | ||||
|             .await | ||||
|             .dns() | ||||
|             .get_hosts() | ||||
|             .iter() | ||||
|             .map(|h| DnsRecord { | ||||
|                 host: h.hostname.clone(), | ||||
|                 domain: h.domain.clone(), | ||||
|                 record_type: h | ||||
|                     .rr | ||||
|                     .parse() | ||||
|                     .expect("received invalid record type {h.rr} from opnsense"), | ||||
|                 value: h | ||||
|                     .server | ||||
|                     .parse() | ||||
|                     .expect("received invalid ipv4 record from opnsense {h.server}"), | ||||
|             }) | ||||
|             .collect() | ||||
|     } | ||||
| 
 | ||||
|     fn get_ip(&self) -> IpAddress { | ||||
| @ -69,12 +68,11 @@ impl DnsServer for OPNSenseFirewall { | ||||
|     } | ||||
| 
 | ||||
|     async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { | ||||
|         todo!("Refactor this to use dnsmasq") | ||||
|         // let mut writable_opnsense = self.opnsense_config.write().await;
 | ||||
|         // let mut dns = writable_opnsense.dns();
 | ||||
|         // dns.register_dhcp_leases(register);
 | ||||
|         //
 | ||||
|         // Ok(())
 | ||||
|         let mut writable_opnsense = self.opnsense_config.write().await; | ||||
|         let mut dns = writable_opnsense.dns(); | ||||
|         dns.register_dhcp_leases(register); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn commit_config(&self) -> Result<(), ExecutorError> { | ||||
|  | ||||
| @ -10,21 +10,13 @@ const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http"; | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl HttpServer for OPNSenseFirewall { | ||||
|     async fn serve_files( | ||||
|         &self, | ||||
|         url: &Url, | ||||
|         remote_path: &Option<String>, | ||||
|     ) -> Result<(), ExecutorError> { | ||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { | ||||
|         let config = self.opnsense_config.read().await; | ||||
|         info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}"); | ||||
|         let remote_upload_path = remote_path | ||||
|             .clone() | ||||
|             .map(|r| format!("{OPNSENSE_HTTP_ROOT_PATH}/{r}")) | ||||
|             .unwrap_or(OPNSENSE_HTTP_ROOT_PATH.to_string()); | ||||
|         match url { | ||||
|             Url::LocalFolder(path) => { | ||||
|                 config | ||||
|                     .upload_files(path, &remote_upload_path) | ||||
|                     .upload_files(path, OPNSENSE_HTTP_ROOT_PATH) | ||||
|                     .await | ||||
|                     .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; | ||||
|             } | ||||
|  | ||||
| @ -1,15 +1,13 @@ | ||||
| use async_trait::async_trait; | ||||
| use log::{debug, error, info, warn}; | ||||
| use opnsense_config_xml::{ | ||||
|     Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString, | ||||
| }; | ||||
| use log::{debug, info, warn}; | ||||
| use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer}; | ||||
| use uuid::Uuid; | ||||
| 
 | ||||
| use crate::{ | ||||
|     executors::ExecutorError, | ||||
|     topology::{ | ||||
|         BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService, | ||||
|         LogicalHost, SSL, | ||||
|         LogicalHost, | ||||
|     }, | ||||
| }; | ||||
| use harmony_types::net::IpAddress; | ||||
| @ -208,22 +206,7 @@ pub(crate) fn get_health_check_for_backend( | ||||
|                 .unwrap_or_default() | ||||
|                 .into(); | ||||
|             let status_code: HttpStatusCode = HttpStatusCode::Success2xx; | ||||
|             let ssl = match haproxy_health_check | ||||
|                 .ssl | ||||
|                 .content_string() | ||||
|                 .to_uppercase() | ||||
|                 .as_str() | ||||
|             { | ||||
|                 "SSL" => SSL::SSL, | ||||
|                 "SSLNI" => SSL::SNI, | ||||
|                 "NOSSL" => SSL::Disabled, | ||||
|                 "" => SSL::Default, | ||||
|                 other => { | ||||
|                     error!("Unknown haproxy health check ssl config {other}"); | ||||
|                     SSL::Other(other.to_string()) | ||||
|                 } | ||||
|             }; | ||||
|             Some(HealthCheck::HTTP(path, method, status_code, ssl)) | ||||
|             Some(HealthCheck::HTTP(path, method, status_code)) | ||||
|         } | ||||
|         _ => panic!("Received unsupported health check type {}", uppercase), | ||||
|     } | ||||
| @ -258,14 +241,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( | ||||
|     // frontend points to backend
 | ||||
|     let healthcheck = if let Some(health_check) = &service.health_check { | ||||
|         match health_check { | ||||
|             HealthCheck::HTTP(path, http_method, _http_status_code, ssl) => { | ||||
|                 let ssl: MaybeString = match ssl { | ||||
|                     SSL::SSL => "ssl".into(), | ||||
|                     SSL::SNI => "sslni".into(), | ||||
|                     SSL::Disabled => "nossl".into(), | ||||
|                     SSL::Default => "".into(), | ||||
|                     SSL::Other(other) => other.as_str().into(), | ||||
|                 }; | ||||
|             HealthCheck::HTTP(path, http_method, _http_status_code) => { | ||||
|                 let haproxy_check = HAProxyHealthCheck { | ||||
|                     name: format!("HTTP_{http_method}_{path}"), | ||||
|                     uuid: Uuid::new_v4().to_string(), | ||||
| @ -273,7 +249,6 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( | ||||
|                     health_check_type: "http".to_string(), | ||||
|                     http_uri: path.clone().into(), | ||||
|                     interval: "2s".to_string(), | ||||
|                     ssl, | ||||
|                     ..Default::default() | ||||
|                 }; | ||||
| 
 | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_types::id::Id; | ||||
| use log::{info, trace}; | ||||
| use log::info; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
| @ -22,8 +22,6 @@ pub struct DhcpScore { | ||||
|     pub filename: Option<String>, | ||||
|     pub filename64: Option<String>, | ||||
|     pub filenameipxe: Option<String>, | ||||
|     pub dhcp_range: (IpAddress, IpAddress), | ||||
|     pub domain: Option<String>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + DhcpServer> Score<T> for DhcpScore { | ||||
| @ -54,6 +52,48 @@ impl DhcpInterpret { | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
|     async fn add_static_entries<D: DhcpServer>( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         dhcp_server: &D, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let dhcp_entries: Vec<DHCPStaticEntry> = self | ||||
|             .score | ||||
|             .host_binding | ||||
|             .iter() | ||||
|             .map(|binding| { | ||||
|                 let ip = match binding.logical_host.ip { | ||||
|                     std::net::IpAddr::V4(ipv4) => ipv4, | ||||
|                     std::net::IpAddr::V6(_) => { | ||||
|                         unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") | ||||
|                     } | ||||
|                 }; | ||||
| 
 | ||||
|                 DHCPStaticEntry { | ||||
|                     name: binding.logical_host.name.clone(), | ||||
|                     mac: binding.physical_host.cluster_mac(), | ||||
|                     ip, | ||||
|                 } | ||||
|             }) | ||||
|             .collect(); | ||||
|         info!("DHCPStaticEntry : {:?}", dhcp_entries); | ||||
| 
 | ||||
|         info!("DHCP server : {:?}", dhcp_server); | ||||
| 
 | ||||
|         let number_new_entries = dhcp_entries.len(); | ||||
| 
 | ||||
|         for entry in dhcp_entries.into_iter() { | ||||
|             match dhcp_server.add_static_mapping(&entry).await { | ||||
|                 Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), | ||||
|                 Err(_) => todo!(), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!("Dhcp Interpret registered {} entries", number_new_entries), | ||||
|         )) | ||||
|     } | ||||
| 
 | ||||
|     async fn set_pxe_options<D: DhcpServer>( | ||||
|         &self, | ||||
| @ -84,7 +124,7 @@ impl DhcpInterpret { | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret { | ||||
| impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::OPNSenseDHCP | ||||
|     } | ||||
| @ -109,16 +149,8 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret { | ||||
|         info!("Executing DhcpInterpret on inventory {inventory:?}"); | ||||
| 
 | ||||
|         self.set_pxe_options(inventory, topology).await?; | ||||
|         topology | ||||
|             .set_dhcp_range(&self.score.dhcp_range.0, &self.score.dhcp_range.1) | ||||
|             .await?; | ||||
| 
 | ||||
|         DhcpHostBindingScore { | ||||
|             host_binding: self.score.host_binding.clone(), | ||||
|             domain: self.score.domain.clone(), | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
|         self.add_static_entries(inventory, topology).await?; | ||||
| 
 | ||||
|         topology.commit_config().await?; | ||||
| 
 | ||||
| @ -128,120 +160,3 @@ impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret { | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct DhcpHostBindingScore { | ||||
|     pub host_binding: Vec<HostBinding>, | ||||
|     pub domain: Option<String>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + DhcpServer> Score<T> for DhcpHostBindingScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(DhcpHostBindingInterpret { | ||||
|             score: self.clone(), | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "DhcpHostBindingScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // https://docs.opnsense.org/manual/dhcp.html#advanced-settings
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct DhcpHostBindingInterpret { | ||||
|     score: DhcpHostBindingScore, | ||||
| } | ||||
| 
 | ||||
| impl DhcpHostBindingInterpret { | ||||
|     async fn add_static_entries<D: DhcpServer>( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         dhcp_server: &D, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let dhcp_entries: Vec<DHCPStaticEntry> = self | ||||
|             .score | ||||
|             .host_binding | ||||
|             .iter() | ||||
|             .map(|binding| { | ||||
|                 let ip = match binding.logical_host.ip { | ||||
|                     std::net::IpAddr::V4(ipv4) => ipv4, | ||||
|                     std::net::IpAddr::V6(_) => { | ||||
|                         unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") | ||||
|                     } | ||||
|                 }; | ||||
| 
 | ||||
|                 let name = if let Some(domain) = self.score.domain.as_ref() { | ||||
|                     format!("{}.{}", binding.logical_host.name, domain) | ||||
|                 } else { | ||||
|                     binding.logical_host.name.clone() | ||||
|                 }; | ||||
| 
 | ||||
|                 DHCPStaticEntry { | ||||
|                     name, | ||||
|                     mac: binding.physical_host.get_mac_address(), | ||||
|                     ip, | ||||
|                 } | ||||
|             }) | ||||
|             .collect(); | ||||
|         info!("DHCPStaticEntry : {:?}", dhcp_entries); | ||||
| 
 | ||||
|         trace!("DHCP server : {:?}", dhcp_server); | ||||
| 
 | ||||
|         let number_new_entries = dhcp_entries.len(); | ||||
| 
 | ||||
|         for entry in dhcp_entries.into_iter() { | ||||
|             match dhcp_server.add_static_mapping(&entry).await { | ||||
|                 Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), | ||||
|                 Err(_) => todo!(), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!("Dhcp Interpret registered {} entries", number_new_entries), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("DhcpHostBindingInterpret") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> crate::domain::data::Version { | ||||
|         Version::from("1.0.0").unwrap() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         info!( | ||||
|             "Executing DhcpHostBindingInterpret on {} bindings", | ||||
|             self.score.host_binding.len() | ||||
|         ); | ||||
| 
 | ||||
|         self.add_static_entries(inventory, topology).await?; | ||||
| 
 | ||||
|         topology.commit_config().await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!( | ||||
|                 "Dhcp Host Binding Interpret execution successful on {} hosts", | ||||
|                 self.score.host_binding.len() | ||||
|             ), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -3,14 +3,14 @@ use derive_new::new; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::{FileContent, FilePath, Version}, | ||||
|     data::{FileContent, Version}, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::Inventory, | ||||
|     score::Score, | ||||
|     topology::{HttpServer, Topology}, | ||||
| }; | ||||
| use harmony_types::id::Id; | ||||
| use harmony_types::net::Url; | ||||
| use harmony_types::{id::Id, net::MacAddress}; | ||||
| 
 | ||||
| /// Configure an HTTP server that is provided by the Topology
 | ||||
| ///
 | ||||
| @ -25,11 +25,8 @@ use harmony_types::{id::Id, net::MacAddress}; | ||||
| /// ```
 | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct StaticFilesHttpScore { | ||||
|     // TODO this should be split in two scores, one for folder and
 | ||||
|     // other for files
 | ||||
|     pub folder_to_serve: Option<Url>, | ||||
|     pub files: Vec<FileContent>, | ||||
|     pub remote_path: Option<String>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore { | ||||
| @ -57,9 +54,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret { | ||||
|         http_server.ensure_initialized().await?; | ||||
|         // http_server.set_ip(topology.router.get_gateway()).await?;
 | ||||
|         if let Some(folder) = self.score.folder_to_serve.as_ref() { | ||||
|             http_server | ||||
|                 .serve_files(folder, &self.score.remote_path) | ||||
|                 .await?; | ||||
|             http_server.serve_files(folder).await?; | ||||
|         } | ||||
| 
 | ||||
|         for f in self.score.files.iter() { | ||||
| @ -96,34 +91,3 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct IPxeMacBootFileScore { | ||||
|     pub content: String, | ||||
|     pub mac_address: Vec<MacAddress>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + HttpServer> Score<T> for IPxeMacBootFileScore { | ||||
|     fn name(&self) -> String { | ||||
|         "IPxeMacBootFileScore".to_string() | ||||
|     } | ||||
| 
 | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         StaticFilesHttpScore { | ||||
|             remote_path: None, | ||||
|             folder_to_serve: None, | ||||
|             files: self | ||||
|                 .mac_address | ||||
|                 .iter() | ||||
|                 .map(|mac| FileContent { | ||||
|                     path: FilePath::Relative(format!( | ||||
|                         "byMAC/01-{}.ipxe", | ||||
|                         mac.to_string().replace(":", "-") | ||||
|                     )), | ||||
|                     content: self.content.clone(), | ||||
|                 }) | ||||
|                 .collect(), | ||||
|         } | ||||
|         .create_interpret() | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,122 +0,0 @@ | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::id::Id; | ||||
| use log::{error, info}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::inventory::LaunchDiscoverInventoryAgentScore, | ||||
|     score::Score, | ||||
|     topology::Topology, | ||||
| }; | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| pub struct DiscoverHostForRoleScore { | ||||
|     pub role: HostRole, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology> Score<T> for DiscoverHostForRoleScore { | ||||
|     fn name(&self) -> String { | ||||
|         "DiscoverInventoryAgentScore".to_string() | ||||
|     } | ||||
| 
 | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(DiscoverHostForRoleInterpret { | ||||
|             score: self.clone(), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct DiscoverHostForRoleInterpret { | ||||
|     score: DiscoverHostForRoleScore, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         info!( | ||||
|             "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`" | ||||
|         ); | ||||
|         LaunchDiscoverInventoryAgentScore { | ||||
|             discovery_timeout: None, | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
| 
 | ||||
|         let host: PhysicalHost; | ||||
|         let host_repo = InventoryRepositoryFactory::build().await?; | ||||
| 
 | ||||
|         loop { | ||||
|             let all_hosts = host_repo.get_all_hosts().await?; | ||||
| 
 | ||||
|             if all_hosts.is_empty() { | ||||
|                 info!("No discovered hosts found yet. Waiting for hosts to appear..."); | ||||
|                 // Sleep to avoid spamming the user and logs while waiting for nodes.
 | ||||
|                 tokio::time::sleep(std::time::Duration::from_secs(3)).await; | ||||
|                 continue; | ||||
|             } | ||||
| 
 | ||||
|             let ans = inquire::Select::new( | ||||
|                 &format!("Select the node to be used for role {:?}:", self.score.role), | ||||
|                 all_hosts, | ||||
|             ) | ||||
|             .with_help_message("Press Esc to refresh the list of discovered hosts") | ||||
|             .prompt(); | ||||
| 
 | ||||
|             match ans { | ||||
|                 Ok(choice) => { | ||||
|                     info!("Selected {} as the bootstrap node.", choice.summary()); | ||||
|                     host_repo | ||||
|                         .save_role_mapping(&self.score.role, &choice) | ||||
|                         .await?; | ||||
|                     host = choice; | ||||
|                     break; | ||||
|                 } | ||||
|                 Err(inquire::InquireError::OperationCanceled) => { | ||||
|                     info!("Refresh requested. Fetching list of discovered hosts again..."); | ||||
|                     continue; | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     error!( | ||||
|                         "Failed to select node for role {:?} : {}", | ||||
|                         self.score.role, e | ||||
|                     ); | ||||
|                     return Err(InterpretError::new(format!( | ||||
|                         "Could not select host : {}", | ||||
|                         e.to_string() | ||||
|                     ))); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::success(format!( | ||||
|             "Successfully discovered host {} for role {:?}", | ||||
|             host.summary(), | ||||
|             self.score.role | ||||
|         ))) | ||||
|     } | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("DiscoverHostForRoleScore") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| @ -1,72 +0,0 @@ | ||||
| use async_trait::async_trait; | ||||
| use harmony_types::id::Id; | ||||
| use log::info; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use strum::IntoEnumIterator; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     score::Score, | ||||
|     topology::Topology, | ||||
| }; | ||||
| 
 | ||||
| #[derive(Debug, Serialize, Deserialize, Clone)] | ||||
| pub struct InspectInventoryScore {} | ||||
| 
 | ||||
| impl<T: Topology> Score<T> for InspectInventoryScore { | ||||
|     fn name(&self) -> String { | ||||
|         "InspectInventoryScore".to_string() | ||||
|     } | ||||
| 
 | ||||
|     #[doc(hidden)] | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(InspectInventoryInterpret {}) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub struct InspectInventoryInterpret; | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology> Interpret<T> for InspectInventoryInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let repo = InventoryRepositoryFactory::build().await?; | ||||
|         for role in HostRole::iter() { | ||||
|             info!("Inspecting hosts for role {role:?}"); | ||||
|             let hosts = repo.get_host_for_role(&role).await?; | ||||
|             info!("Hosts with role {role:?} : {}", hosts.len()); | ||||
|             hosts.iter().enumerate().for_each(|(idx, h)| { | ||||
|                 info!( | ||||
|                     "Found host index {idx} with role {role:?} => \n{}\n{}", | ||||
|                     h.summary(), | ||||
|                     h.parts_list() | ||||
|                 ) | ||||
|             }); | ||||
|         } | ||||
|         Ok(Outcome::success( | ||||
|             "Inventory inspection complete".to_string(), | ||||
|         )) | ||||
|     } | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("InspectInventoryInterpret") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| @ -1,7 +1,3 @@ | ||||
| mod discovery; | ||||
| pub mod inspect; | ||||
| pub use discovery::*; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use harmony_inventory_agent::local_presence::DiscoveryEvent; | ||||
| use log::{debug, info, trace}; | ||||
| @ -22,11 +18,11 @@ use harmony_types::id::Id; | ||||
| /// This will allow us to register/update hosts running harmony_inventory_agent
 | ||||
| /// from LAN in the Harmony inventory
 | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| pub struct LaunchDiscoverInventoryAgentScore { | ||||
| pub struct DiscoverInventoryAgentScore { | ||||
|     pub discovery_timeout: Option<u64>, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore { | ||||
| impl<T: Topology> Score<T> for DiscoverInventoryAgentScore { | ||||
|     fn name(&self) -> String { | ||||
|         "DiscoverInventoryAgentScore".to_string() | ||||
|     } | ||||
| @ -40,7 +36,7 @@ impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore { | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| struct DiscoverInventoryAgentInterpret { | ||||
|     score: LaunchDiscoverInventoryAgentScore, | ||||
|     score: DiscoverInventoryAgentScore, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| @ -50,13 +46,6 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret { | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         match self.score.discovery_timeout { | ||||
|             Some(timeout) => info!("Discovery agent will wait for {timeout} seconds"), | ||||
|             None => info!( | ||||
|                 "Discovery agent will wait forever in the background, go on and enjoy this delicious inventory." | ||||
|             ), | ||||
|         }; | ||||
| 
 | ||||
|         harmony_inventory_agent::local_presence::discover_agents( | ||||
|             self.score.discovery_timeout, | ||||
|             |event: DiscoveryEvent| -> Result<(), String> { | ||||
|  | ||||
							
								
								
									
										67
									
								
								harmony/src/modules/ipxe.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								harmony/src/modules/ipxe.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,67 @@ | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::Inventory, | ||||
|     score::Score, | ||||
|     topology::Topology, | ||||
| }; | ||||
| use harmony_types::id::Id; | ||||
| 
 | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct IpxeScore { | ||||
|     //files_to_serve: Url,
 | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology> Score<T> for IpxeScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(IpxeInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "IpxeScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, new, Clone)] | ||||
| pub struct IpxeInterpret { | ||||
|     _score: IpxeScore, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology> Interpret<T> for IpxeInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &T, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         /* | ||||
|         let http_server = &topology.http_server; | ||||
|         http_server.ensure_initialized().await?; | ||||
|         Ok(Outcome::success(format!( | ||||
|             "Http Server running and serving files from {}", | ||||
|             self.score.files_to_serve | ||||
|         ))) | ||||
|         */ | ||||
|         todo!(); | ||||
|     } | ||||
| 
 | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Ipxe | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         todo!() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         todo!() | ||||
|     } | ||||
| } | ||||
| @ -6,6 +6,7 @@ pub mod dummy; | ||||
| pub mod helm; | ||||
| pub mod http; | ||||
| pub mod inventory; | ||||
| pub mod ipxe; | ||||
| pub mod k3d; | ||||
| pub mod k8s; | ||||
| pub mod lamp; | ||||
|  | ||||
| @ -1,120 +0,0 @@ | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_types::id::Id; | ||||
| use log::{error, info, warn}; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore}, | ||||
|     score::Score, | ||||
|     topology::HAClusterTopology, | ||||
| }; | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| // Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
 | ||||
| // - This score exposes/ensures the default inventory assets and waits for discoveries.
 | ||||
| // - No early bonding. Simple access DHCP.
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup01InventoryScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup01InventoryScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup01InventoryInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup01InventoryScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup01InventoryInterpret { | ||||
|     score: OKDSetup01InventoryScore, | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup01InventoryInterpret { | ||||
|     pub fn new(score: OKDSetup01InventoryScore) -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             score, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup01Inventory") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         info!("Setting up base DNS config for OKD"); | ||||
|         let cluster_domain = &topology.domain_name; | ||||
|         let load_balancer_ip = &topology.load_balancer.get_ip(); | ||||
|         inquire::Confirm::new(&format!( | ||||
|             "Set hostnames manually in your opnsense dnsmasq config :
 | ||||
| *.apps.{cluster_domain} -> {load_balancer_ip} | ||||
| api.{cluster_domain} -> {load_balancer_ip} | ||||
| api-int.{cluster_domain} -> {load_balancer_ip} | ||||
| 
 | ||||
| When you can dig them, confirm to continue. | ||||
| " | ||||
|         )) | ||||
|         .prompt() | ||||
|         .expect("Prompt error"); | ||||
|         // TODO reactivate automatic dns config when migration from unbound to dnsmasq is done
 | ||||
|         // OKDDnsScore::new(topology)
 | ||||
|         //     .interpret(inventory, topology)
 | ||||
|         //     .await?;
 | ||||
| 
 | ||||
|         // TODO refactor this section into a function discover_hosts_for_role(...) that can be used
 | ||||
|         // from anywhere in the project, not a member of this struct
 | ||||
| 
 | ||||
|         let mut bootstrap_host: Option<PhysicalHost> = None; | ||||
|         let repo = InventoryRepositoryFactory::build().await?; | ||||
| 
 | ||||
|         while bootstrap_host.is_none() { | ||||
|             let hosts = repo.get_host_for_role(&HostRole::Bootstrap).await?; | ||||
|             bootstrap_host = hosts.into_iter().next().to_owned(); | ||||
|             DiscoverHostForRoleScore { | ||||
|                 role: HostRole::Bootstrap, | ||||
|             } | ||||
|             .interpret(inventory, topology) | ||||
|             .await?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             format!( | ||||
|                 "Found and assigned bootstrap node: {}", | ||||
|                 bootstrap_host.unwrap().summary() | ||||
|             ), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -1,387 +0,0 @@ | ||||
| use std::{fmt::Write, path::PathBuf}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_types::id::Id; | ||||
| use log::{debug, error, info, warn}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     config::secret::{RedhatSecret, SshKeyPair}, | ||||
|     data::{FileContent, FilePath, Version}, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     instrumentation::{HarmonyEvent, instrument}, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::{ | ||||
|         dhcp::DhcpHostBindingScore, | ||||
|         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||
|         inventory::LaunchDiscoverInventoryAgentScore, | ||||
|         okd::{ | ||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||
|             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||
|         }, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HAClusterTopology, HostBinding}, | ||||
| }; | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| // Step 02: Bootstrap
 | ||||
| // - Select bootstrap node (from discovered set).
 | ||||
| // - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition.
 | ||||
| // - Reboot the host via SSH and wait for bootstrap-complete.
 | ||||
| // - No bonding at this stage unless absolutely required; prefer persistence via MC later.
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup02BootstrapScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup02BootstrapScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup02BootstrapInterpret::new()) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup02BootstrapScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup02BootstrapInterpret { | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup02BootstrapInterpret { | ||||
|     pub fn new() -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn get_bootstrap_node(&self) -> Result<PhysicalHost, InterpretError> { | ||||
|         let repo = InventoryRepositoryFactory::build().await?; | ||||
|         match repo | ||||
|             .get_host_for_role(&HostRole::Bootstrap) | ||||
|             .await? | ||||
|             .into_iter() | ||||
|             .next() | ||||
|         { | ||||
|             Some(host) => Ok(host), | ||||
|             None => Err(InterpretError::new( | ||||
|                 "No bootstrap node available".to_string(), | ||||
|             )), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn prepare_ignition_files( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         let okd_bin_path = PathBuf::from("./data/okd/bin"); | ||||
|         let okd_installation_path_str = | ||||
|             format!("./data/okd/installation_files_{}", inventory.location.name); | ||||
|         let okd_images_path = &PathBuf::from("./data/okd/installer_image/"); | ||||
|         let okd_installation_path = &PathBuf::from(okd_installation_path_str); | ||||
| 
 | ||||
|         let exit_status = Command::new("mkdir") | ||||
|             .arg("-p") | ||||
|             .arg(okd_installation_path) | ||||
|             .spawn() | ||||
|             .expect("Command failed to start") | ||||
|             .wait() | ||||
|             .await | ||||
|             .map_err(|e| { | ||||
|                 InterpretError::new(format!("Failed to create okd installation directory : {e}")) | ||||
|             })?; | ||||
|         if !exit_status.success() { | ||||
|             return Err(InterpretError::new(format!( | ||||
|                 "Failed to create okd installation directory" | ||||
|             ))); | ||||
|         } else { | ||||
|             info!( | ||||
|                 "Created OKD installation directory {}", | ||||
|                 okd_installation_path.to_string_lossy() | ||||
|             ); | ||||
|         } | ||||
| 
 | ||||
|         let redhat_secret = SecretManager::get_or_prompt::<RedhatSecret>().await?; | ||||
|         let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await?; | ||||
| 
 | ||||
|         let install_config_yaml = InstallConfigYaml { | ||||
|             cluster_name: &topology.get_cluster_name(), | ||||
|             cluster_domain: &topology.get_cluster_base_domain(), | ||||
|             pull_secret: &redhat_secret.pull_secret, | ||||
|             ssh_public_key: &ssh_key.public, | ||||
|         } | ||||
|         .to_string(); | ||||
| 
 | ||||
|         let install_config_file_path = &okd_installation_path.join("install-config.yaml"); | ||||
| 
 | ||||
|         self.create_file(install_config_file_path, install_config_yaml.as_bytes()) | ||||
|             .await?; | ||||
| 
 | ||||
|         let install_config_backup_extension = install_config_file_path | ||||
|             .extension() | ||||
|             .map(|e| format!("{}.bak", e.to_string_lossy())) | ||||
|             .unwrap_or("bak".to_string()); | ||||
| 
 | ||||
|         let mut install_config_backup = install_config_file_path.clone(); | ||||
|         install_config_backup.set_extension(install_config_backup_extension); | ||||
| 
 | ||||
|         self.create_file(&install_config_backup, install_config_yaml.as_bytes()) | ||||
|             .await?; | ||||
| 
 | ||||
|         info!("Creating manifest files with openshift-install"); | ||||
|         let output = Command::new(okd_bin_path.join("openshift-install")) | ||||
|             .args([ | ||||
|                 "create", | ||||
|                 "manifests", | ||||
|                 "--dir", | ||||
|                 okd_installation_path.to_str().unwrap(), | ||||
|             ]) | ||||
|             .output() | ||||
|             .await | ||||
|             .map_err(|e| InterpretError::new(format!("Failed to create okd manifest : {e}")))?; | ||||
|         let stdout = String::from_utf8(output.stdout).unwrap(); | ||||
|         info!("openshift-install stdout :\n\n{}", stdout); | ||||
|         let stderr = String::from_utf8(output.stderr).unwrap(); | ||||
|         info!("openshift-install stderr :\n\n{}", stderr); | ||||
|         info!("openshift-install exit status : {}", output.status); | ||||
|         if !output.status.success() { | ||||
|             return Err(InterpretError::new(format!( | ||||
|                 "Failed to create okd manifest, exit code {} : {}", | ||||
|                 output.status, stderr | ||||
|             ))); | ||||
|         } | ||||
| 
 | ||||
|         info!("Creating ignition files with openshift-install"); | ||||
|         let output = Command::new(okd_bin_path.join("openshift-install")) | ||||
|             .args([ | ||||
|                 "create", | ||||
|                 "ignition-configs", | ||||
|                 "--dir", | ||||
|                 okd_installation_path.to_str().unwrap(), | ||||
|             ]) | ||||
|             .output() | ||||
|             .await | ||||
|             .map_err(|e| { | ||||
|                 InterpretError::new(format!("Failed to create okd ignition config : {e}")) | ||||
|             })?; | ||||
|         let stdout = String::from_utf8(output.stdout).unwrap(); | ||||
|         info!("openshift-install stdout :\n\n{}", stdout); | ||||
|         let stderr = String::from_utf8(output.stderr).unwrap(); | ||||
|         info!("openshift-install stderr :\n\n{}", stderr); | ||||
|         info!("openshift-install exit status : {}", output.status); | ||||
|         if !output.status.success() { | ||||
|             return Err(InterpretError::new(format!( | ||||
|                 "Failed to create okd manifest, exit code {} : {}", | ||||
|                 output.status, stderr | ||||
|             ))); | ||||
|         } | ||||
| 
 | ||||
|         let ignition_files_http_path = PathBuf::from("okd_ignition_files"); | ||||
|         let prepare_file_content = async |filename: &str| -> Result<FileContent, InterpretError> { | ||||
|             let local_path = okd_installation_path.join(filename); | ||||
|             let remote_path = ignition_files_http_path.join(filename); | ||||
| 
 | ||||
|             info!( | ||||
|                 "Preparing file content for local file : {} to remote : {}", | ||||
|                 local_path.to_string_lossy(), | ||||
|                 remote_path.to_string_lossy() | ||||
|             ); | ||||
| 
 | ||||
|             let content = tokio::fs::read_to_string(&local_path).await.map_err(|e| { | ||||
|                 InterpretError::new(format!( | ||||
|                     "Could not read file content {} : {e}", | ||||
|                     local_path.to_string_lossy() | ||||
|                 )) | ||||
|             })?; | ||||
| 
 | ||||
|             Ok(FileContent { | ||||
|                 path: FilePath::Relative(remote_path.to_string_lossy().to_string()), | ||||
|                 content, | ||||
|             }) | ||||
|         }; | ||||
| 
 | ||||
|         StaticFilesHttpScore { | ||||
|             remote_path: None, | ||||
|             folder_to_serve: None, | ||||
|             files: vec![ | ||||
|                 prepare_file_content("bootstrap.ign").await?, | ||||
|                 prepare_file_content("master.ign").await?, | ||||
|                 prepare_file_content("worker.ign").await?, | ||||
|                 prepare_file_content("metadata.json").await?, | ||||
|             ], | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
| 
 | ||||
|         info!("Successfully prepared ignition files for OKD installation"); | ||||
|         // ignition_files_http_path // = PathBuf::from("okd_ignition_files");
 | ||||
|         info!( | ||||
|             r#"Uploading images, they can be refreshed with a command similar to this one: openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel.|initramfs.|rootfs.)\w+(\.img)?"'  | grep x86_64 | xargs -n 1 curl -LO"# | ||||
|         ); | ||||
| 
 | ||||
|         inquire::Confirm::new( | ||||
|             &format!("push installer image files with `scp -r {}/* root@{}:/usr/local/http/scos/` until performance issue is resolved", okd_images_path.to_string_lossy(), topology.http_server.get_ip())).prompt().expect("Prompt error"); | ||||
| 
 | ||||
|         // let scos_http_path = PathBuf::from("scos");
 | ||||
|         // StaticFilesHttpScore {
 | ||||
|         //     folder_to_serve: Some(Url::LocalFolder(
 | ||||
|         //         okd_images_path.to_string_lossy().to_string(),
 | ||||
|         //     )),
 | ||||
|         //     remote_path: Some(scos_http_path.to_string_lossy().to_string()),
 | ||||
|         //     files: vec![],
 | ||||
|         // }
 | ||||
|         // .interpret(inventory, topology)
 | ||||
|         // .await?;
 | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn configure_host_binding( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         let binding = HostBinding { | ||||
|             logical_host: topology.bootstrap_host.clone(), | ||||
|             physical_host: self.get_bootstrap_node().await?, | ||||
|         }; | ||||
|         info!("Configuring host binding for bootstrap node {binding:?}"); | ||||
| 
 | ||||
|         DhcpHostBindingScore { | ||||
|             host_binding: vec![binding], | ||||
|             domain: Some(topology.domain_name.clone()), | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn render_per_mac_pxe( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         let content = BootstrapIpxeTpl { | ||||
|             http_ip: &topology.http_server.get_ip().to_string(), | ||||
|             scos_path: "scos",                        // TODO use some constant
 | ||||
|             ignition_http_path: "okd_ignition_files", // TODO use proper variable
 | ||||
|             installation_device: "/dev/sda", | ||||
|             ignition_file_name: "bootstrap.ign", | ||||
|         } | ||||
|         .to_string(); | ||||
| 
 | ||||
|         let bootstrap_node = self.get_bootstrap_node().await?; | ||||
|         let mac_address = bootstrap_node.get_mac_address(); | ||||
| 
 | ||||
|         info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); | ||||
|         debug!("bootstrap ipxe content : {content}"); | ||||
|         debug!("bootstrap mac addresses : {mac_address:?}"); | ||||
| 
 | ||||
|         IPxeMacBootFileScore { | ||||
|             mac_address, | ||||
|             content, | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn setup_bootstrap_load_balancer( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         let outcome = OKDBootstrapLoadBalancerScore::new(topology) | ||||
|             .interpret(inventory, topology) | ||||
|             .await?; | ||||
|         info!("Successfully executed OKDBootstrapLoadBalancerScore : {outcome:?}"); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn reboot_target(&self) -> Result<(), InterpretError> { | ||||
|         // Placeholder: ssh reboot using the inventory ephemeral key
 | ||||
|         info!("[Bootstrap] Rebooting bootstrap node via SSH"); | ||||
|         // TODO reboot programatically, there are some logical checks and refactoring to do such as
 | ||||
|         // accessing the bootstrap node config (ip address) from the inventory
 | ||||
|         let confirmation = inquire::Confirm::new( | ||||
|                 "Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.", | ||||
|         ) | ||||
|         .prompt() | ||||
|         .expect("Unexpected prompt error"); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> { | ||||
|         // Placeholder: wait-for bootstrap-complete
 | ||||
|         info!("[Bootstrap] Waiting for bootstrap-complete …"); | ||||
|         todo!("[Bootstrap] Waiting for bootstrap-complete …") | ||||
|     } | ||||
| 
 | ||||
|     async fn create_file(&self, path: &PathBuf, content: &[u8]) -> Result<(), InterpretError> { | ||||
|         let mut install_config_file = File::create(path).await.map_err(|e| { | ||||
|             InterpretError::new(format!( | ||||
|                 "Could not create file {} : {e}", | ||||
|                 path.to_string_lossy() | ||||
|             )) | ||||
|         })?; | ||||
|         install_config_file.write(content).await.map_err(|e| { | ||||
|             InterpretError::new(format!( | ||||
|                 "Could not write file {} : {e}", | ||||
|                 path.to_string_lossy() | ||||
|             )) | ||||
|         })?; | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup02Bootstrap") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         self.configure_host_binding(inventory, topology).await?; | ||||
|         self.prepare_ignition_files(inventory, topology).await?; | ||||
|         self.render_per_mac_pxe(inventory, topology).await?; | ||||
|         self.setup_bootstrap_load_balancer(inventory, topology) | ||||
|             .await?; | ||||
| 
 | ||||
|         // TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal
 | ||||
|         // self.validate_dns_config(inventory, topology).await?;
 | ||||
| 
 | ||||
|         self.reboot_target().await?; | ||||
|         self.wait_for_bootstrap_complete().await?; | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             "Bootstrap phase complete".into(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -1,277 +0,0 @@ | ||||
| use std::{fmt::Write, path::PathBuf}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_types::id::Id; | ||||
| use log::{debug, info}; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::Version, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::{ | ||||
|         dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, | ||||
|         inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HAClusterTopology, HostBinding}, | ||||
| }; | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| // Step 03: Control Plane
 | ||||
| // - Render per-MAC PXE & ignition for cp0/cp1/cp2.
 | ||||
| // - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup03ControlPlaneScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup03ControlPlaneScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup03ControlPlaneInterpret { | ||||
|     score: OKDSetup03ControlPlaneScore, | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup03ControlPlaneInterpret { | ||||
|     pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             score, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Ensures that three physical hosts are discovered and available for the ControlPlane role.
 | ||||
|     /// It will trigger discovery if not enough hosts are found.
 | ||||
|     async fn get_nodes( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<Vec<PhysicalHost>, InterpretError> { | ||||
|         const REQUIRED_HOSTS: usize = 3; | ||||
|         let repo = InventoryRepositoryFactory::build().await?; | ||||
|         let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; | ||||
| 
 | ||||
|         while control_plane_hosts.len() < REQUIRED_HOSTS { | ||||
|             info!( | ||||
|                 "Discovery of {} control plane hosts in progress, current number {}", | ||||
|                 REQUIRED_HOSTS, | ||||
|                 control_plane_hosts.len() | ||||
|             ); | ||||
|             // This score triggers the discovery agent for a specific role.
 | ||||
|             DiscoverHostForRoleScore { | ||||
|                 role: HostRole::ControlPlane, | ||||
|             } | ||||
|             .interpret(inventory, topology) | ||||
|             .await?; | ||||
|             control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; | ||||
|         } | ||||
| 
 | ||||
|         if control_plane_hosts.len() < REQUIRED_HOSTS { | ||||
|             Err(InterpretError::new(format!( | ||||
|                 "OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.", | ||||
|                 REQUIRED_HOSTS, | ||||
|                 control_plane_hosts.len() | ||||
|             ))) | ||||
|         } else { | ||||
|             // Take exactly the number of required hosts to ensure consistency.
 | ||||
|             Ok(control_plane_hosts | ||||
|                 .into_iter() | ||||
|                 .take(REQUIRED_HOSTS) | ||||
|                 .collect()) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Configures DHCP host bindings for all control plane nodes.
 | ||||
|     async fn configure_host_binding( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|         nodes: &Vec<PhysicalHost>, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         info!("[ControlPlane] Configuring host bindings for control plane nodes."); | ||||
| 
 | ||||
|         // Ensure the topology definition matches the number of physical nodes found.
 | ||||
|         if topology.control_plane.len() != nodes.len() { | ||||
|             return Err(InterpretError::new(format!( | ||||
|                 "Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).", | ||||
|                 topology.control_plane.len(), | ||||
|                 nodes.len() | ||||
|             ))); | ||||
|         } | ||||
| 
 | ||||
|         // Create a binding for each physical host to its corresponding logical host.
 | ||||
|         let bindings: Vec<HostBinding> = topology | ||||
|             .control_plane | ||||
|             .iter() | ||||
|             .zip(nodes.iter()) | ||||
|             .map(|(logical_host, physical_host)| { | ||||
|                 info!( | ||||
|                     "Creating binding: Logical Host '{}' -> Physical Host ID '{}'", | ||||
|                     logical_host.name, physical_host.id | ||||
|                 ); | ||||
|                 HostBinding { | ||||
|                     logical_host: logical_host.clone(), | ||||
|                     physical_host: physical_host.clone(), | ||||
|                 } | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         DhcpHostBindingScore { | ||||
|             host_binding: bindings, | ||||
|             domain: Some(topology.domain_name.clone()), | ||||
|         } | ||||
|         .interpret(inventory, topology) | ||||
|         .await?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Renders and deploys a per-MAC iPXE boot file for each control plane node.
 | ||||
|     async fn configure_ipxe( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|         nodes: &Vec<PhysicalHost>, | ||||
|     ) -> Result<(), InterpretError> { | ||||
|         info!("[ControlPlane] Rendering per-MAC iPXE configurations."); | ||||
| 
 | ||||
|         // The iPXE script content is the same for all control plane nodes,
 | ||||
|         // pointing to the 'master.ign' ignition file.
 | ||||
|         let content = BootstrapIpxeTpl { | ||||
|             http_ip: &topology.http_server.get_ip().to_string(), | ||||
|             scos_path: "scos", | ||||
|             ignition_http_path: "okd_ignition_files", | ||||
|             installation_device: "/dev/sda", // This might need to be configurable per-host in the future
 | ||||
|             ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
 | ||||
|         } | ||||
|         .to_string(); | ||||
| 
 | ||||
|         debug!("[ControlPlane] iPXE content template:\n{}", content); | ||||
| 
 | ||||
|         // Create and apply an iPXE boot file for each node.
 | ||||
|         for node in nodes { | ||||
|             let mac_address = node.get_mac_address(); | ||||
|             if mac_address.is_empty() { | ||||
|                 return Err(InterpretError::new(format!( | ||||
|                     "Physical host with ID '{}' has no MAC addresses defined.", | ||||
|                     node.id | ||||
|                 ))); | ||||
|             } | ||||
|             info!( | ||||
|                 "[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}", | ||||
|                 node.id, mac_address | ||||
|             ); | ||||
| 
 | ||||
|             IPxeMacBootFileScore { | ||||
|                 mac_address, | ||||
|                 content: content.clone(), | ||||
|             } | ||||
|             .interpret(inventory, topology) | ||||
|             .await?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Prompts the user to reboot the target control plane nodes.
 | ||||
|     async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> { | ||||
|         let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect(); | ||||
|         info!( | ||||
|             "[ControlPlane] Requesting reboot for control plane nodes: {:?}", | ||||
|             node_ids | ||||
|         ); | ||||
| 
 | ||||
|         let confirmation = inquire::Confirm::new( | ||||
|                 &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), | ||||
|         ) | ||||
|         .prompt() | ||||
|         .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; | ||||
| 
 | ||||
|         if !confirmation { | ||||
|             return Err(InterpretError::new( | ||||
|                 "User aborted the operation.".to_string(), | ||||
|             )); | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Placeholder for automating network bonding configuration.
 | ||||
|     async fn persist_network_bond(&self) -> Result<(), InterpretError> { | ||||
|         // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
 | ||||
|         info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); | ||||
|         inquire::Confirm::new( | ||||
|             "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", | ||||
|         ) | ||||
|         .prompt() | ||||
|         .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup03ControlPlane") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
|         topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         // 1. Ensure we have 3 physical hosts for the control plane.
 | ||||
|         let nodes = self.get_nodes(inventory, topology).await?; | ||||
| 
 | ||||
|         // 2. Create DHCP reservations for the control plane nodes.
 | ||||
|         self.configure_host_binding(inventory, topology, &nodes) | ||||
|             .await?; | ||||
| 
 | ||||
|         // 3. Create iPXE files for each control plane node to boot from the master ignition.
 | ||||
|         self.configure_ipxe(inventory, topology, &nodes).await?; | ||||
| 
 | ||||
|         // 4. Reboot the nodes to start the OS installation.
 | ||||
|         self.reboot_targets(&nodes).await?; | ||||
| 
 | ||||
|         // 5. Placeholder for post-boot network configuration (e.g., bonding).
 | ||||
|         self.persist_network_bond().await?; | ||||
| 
 | ||||
|         // TODO: Implement a step to wait for the control plane nodes to join the cluster
 | ||||
|         // and for the cluster operators to become available. This would be similar to
 | ||||
|         // the `wait-for bootstrap-complete` command.
 | ||||
|         info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually."); | ||||
| 
 | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             "Control plane provisioning has been successfully initiated.".into(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -1,102 +0,0 @@ | ||||
| use std::{fmt::Write, path::PathBuf}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_types::id::Id; | ||||
| use log::{debug, error, info, warn}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     config::secret::{RedhatSecret, SshKeyPair}, | ||||
|     data::{FileContent, FilePath, Version}, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     instrumentation::{HarmonyEvent, instrument}, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::{ | ||||
|         dhcp::DhcpHostBindingScore, | ||||
|         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||
|         inventory::LaunchDiscoverInventoryAgentScore, | ||||
|         okd::{ | ||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||
|             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||
|         }, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HAClusterTopology, HostBinding}, | ||||
| }; | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| // Step 04: Workers
 | ||||
| // - Render per-MAC PXE & ignition for workers; join nodes.
 | ||||
| // - Persist bonding via MC/NNCP as required (same approach as masters).
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup04WorkersScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup04WorkersScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup04WorkersInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup04WorkersScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup04WorkersInterpret { | ||||
|     score: OKDSetup04WorkersScore, | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup04WorkersInterpret { | ||||
|     pub fn new(score: OKDSetup04WorkersScore) -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             score, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn render_and_reboot(&self) -> Result<(), InterpretError> { | ||||
|         info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup04Workers") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         self.render_and_reboot().await?; | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             "Workers provisioned".into(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -1,101 +0,0 @@ | ||||
| use std::{fmt::Write, path::PathBuf}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_types::id::Id; | ||||
| use log::{debug, error, info, warn}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     config::secret::{RedhatSecret, SshKeyPair}, | ||||
|     data::{FileContent, FilePath, Version}, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     instrumentation::{HarmonyEvent, instrument}, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::{ | ||||
|         dhcp::DhcpHostBindingScore, | ||||
|         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||
|         inventory::LaunchDiscoverInventoryAgentScore, | ||||
|         okd::{ | ||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||
|             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||
|         }, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HAClusterTopology, HostBinding}, | ||||
| }; | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| // Step 05: Sanity Check
 | ||||
| // - Validate API reachability, ClusterOperators, ingress, and SDN status.
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup05SanityCheckScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup05SanityCheckScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup05SanityCheckInterpret { | ||||
|     score: OKDSetup05SanityCheckScore, | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup05SanityCheckInterpret { | ||||
|     pub fn new(score: OKDSetup05SanityCheckScore) -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             score, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn run_checks(&self) -> Result<(), InterpretError> { | ||||
|         info!("[Sanity] Checking API, COs, Ingress, and SDN health …"); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup05SanityCheck") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         self.run_checks().await?; | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             "Sanity checks passed".into(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -1,101 +0,0 @@ | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_secret::SecretManager; | ||||
| use harmony_types::id::Id; | ||||
| use log::{debug, error, info, warn}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::{fmt::Write, path::PathBuf}; | ||||
| use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||
| 
 | ||||
| use crate::{ | ||||
|     config::secret::{RedhatSecret, SshKeyPair}, | ||||
|     data::{FileContent, FilePath, Version}, | ||||
|     hardware::PhysicalHost, | ||||
|     infra::inventory::InventoryRepositoryFactory, | ||||
|     instrumentation::{HarmonyEvent, instrument}, | ||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||
|     inventory::{HostRole, Inventory}, | ||||
|     modules::{ | ||||
|         dhcp::DhcpHostBindingScore, | ||||
|         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||
|         inventory::LaunchDiscoverInventoryAgentScore, | ||||
|         okd::{ | ||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||
|             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||
|         }, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::{HAClusterTopology, HostBinding}, | ||||
| }; | ||||
| 
 | ||||
| // Step 06: Installation Report
 | ||||
| // - Emit JSON and concise human summary of nodes, roles, versions, and health.
 | ||||
| // -------------------------------------------------------------------------------------------------
 | ||||
| 
 | ||||
| #[derive(Debug, Clone, Serialize, new)] | ||||
| pub struct OKDSetup06InstallationReportScore {} | ||||
| 
 | ||||
| impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||
|         Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDSetup06InstallationReportScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct OKDSetup06InstallationReportInterpret { | ||||
|     score: OKDSetup06InstallationReportScore, | ||||
|     version: Version, | ||||
|     status: InterpretStatus, | ||||
| } | ||||
| 
 | ||||
| impl OKDSetup06InstallationReportInterpret { | ||||
|     pub fn new(score: OKDSetup06InstallationReportScore) -> Self { | ||||
|         let version = Version::from("1.0.0").unwrap(); | ||||
|         Self { | ||||
|             version, | ||||
|             score, | ||||
|             status: InterpretStatus::QUEUED, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     async fn generate(&self) -> Result<(), InterpretError> { | ||||
|         info!("[Report] Generating OKD installation report",); | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret { | ||||
|     fn get_name(&self) -> InterpretName { | ||||
|         InterpretName::Custom("OKDSetup06InstallationReport") | ||||
|     } | ||||
| 
 | ||||
|     fn get_version(&self) -> Version { | ||||
|         self.version.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_status(&self) -> InterpretStatus { | ||||
|         self.status.clone() | ||||
|     } | ||||
| 
 | ||||
|     fn get_children(&self) -> Vec<Id> { | ||||
|         vec![] | ||||
|     } | ||||
| 
 | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         _inventory: &Inventory, | ||||
|         _topology: &HAClusterTopology, | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         self.generate().await?; | ||||
|         Ok(Outcome::new( | ||||
|             InterpretStatus::SUCCESS, | ||||
|             "Installation report generated".into(), | ||||
|         )) | ||||
|     } | ||||
| } | ||||
| @ -37,23 +37,21 @@ impl OKDBootstrapDhcpScore { | ||||
|                 .clone(), | ||||
|         }); | ||||
|         // TODO refactor this so it is not copy pasted from dhcp.rs
 | ||||
|         todo!("Add dhcp range") | ||||
|         // Self {
 | ||||
|         //     dhcp_score: DhcpScore::new(
 | ||||
|         //         host_binding,
 | ||||
|         //         // TODO : we should add a tftp server to the topology instead of relying on the
 | ||||
|         //         // router address, this is leaking implementation details
 | ||||
|         //         Some(topology.router.get_gateway()),
 | ||||
|         //         None, // To allow UEFI boot we cannot provide a legacy file
 | ||||
|         //         Some("undionly.kpxe".to_string()),
 | ||||
|         //         Some("ipxe.efi".to_string()),
 | ||||
|         //         Some(format!(
 | ||||
|         //             "http://{}:8080/boot.ipxe",
 | ||||
|         //             topology.router.get_gateway()
 | ||||
|         //         )),
 | ||||
|         //         (self.),
 | ||||
|         //     ),
 | ||||
|         // }
 | ||||
|         Self { | ||||
|             dhcp_score: DhcpScore::new( | ||||
|                 host_binding, | ||||
|                 // TODO : we should add a tftp server to the topology instead of relying on the
 | ||||
|                 // router address, this is leaking implementation details
 | ||||
|                 Some(topology.router.get_gateway()), | ||||
|                 None, // To allow UEFI boot we cannot provide a legacy file
 | ||||
|                 Some("undionly.kpxe".to_string()), | ||||
|                 Some("ipxe.efi".to_string()), | ||||
|                 Some(format!( | ||||
|                     "http://{}:8080/boot.ipxe", | ||||
|                     topology.router.get_gateway() | ||||
|                 )), | ||||
|             ), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -8,7 +8,7 @@ use crate::{ | ||||
|     score::Score, | ||||
|     topology::{ | ||||
|         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, | ||||
|         LoadBalancerService, SSL, Topology, | ||||
|         LoadBalancerService, Topology, | ||||
|     }, | ||||
| }; | ||||
| 
 | ||||
| @ -44,7 +44,6 @@ impl OKDBootstrapLoadBalancerScore { | ||||
|                     "/readyz".to_string(), | ||||
|                     HttpMethod::GET, | ||||
|                     HttpStatusCode::Success2xx, | ||||
|                     SSL::SSL, | ||||
|                 )), | ||||
|             }, | ||||
|         ]; | ||||
| @ -55,7 +54,6 @@ impl OKDBootstrapLoadBalancerScore { | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> { | ||||
|         let mut backend: Vec<_> = topology | ||||
|             .control_plane | ||||
| @ -65,14 +63,6 @@ impl OKDBootstrapLoadBalancerScore { | ||||
|                 port, | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         topology.workers.iter().for_each(|worker| { | ||||
|             backend.push(BackendServer { | ||||
|                 address: worker.ip.to_string(), | ||||
|                 port, | ||||
|             }) | ||||
|         }); | ||||
| 
 | ||||
|         backend.push(BackendServer { | ||||
|             address: topology.bootstrap_host.ip.to_string(), | ||||
|             port, | ||||
|  | ||||
| @ -1,6 +1,3 @@ | ||||
| use std::net::Ipv4Addr; | ||||
| 
 | ||||
| use harmony_types::net::IpAddress; | ||||
| use serde::Serialize; | ||||
| 
 | ||||
| use crate::{ | ||||
| @ -47,16 +44,6 @@ impl OKDDhcpScore { | ||||
|                 }) | ||||
|             }); | ||||
| 
 | ||||
|         let dhcp_server_ip = match topology.dhcp_server.get_ip() { | ||||
|             std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, | ||||
|             std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), | ||||
|         }; | ||||
| 
 | ||||
|         // TODO this could overflow, we should use proper subnet maths here instead of an ip
 | ||||
|         // address and guessing the subnet size from there
 | ||||
|         let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); | ||||
|         let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); | ||||
| 
 | ||||
|         Self { | ||||
|             // TODO : we should add a tftp server to the topology instead of relying on the
 | ||||
|             // router address, this is leaking implementation details
 | ||||
| @ -70,8 +57,6 @@ impl OKDDhcpScore { | ||||
|                     "http://{}:8080/boot.ipxe", | ||||
|                     topology.router.get_gateway() | ||||
|                 )), | ||||
|                 dhcp_range: (IpAddress::from(start), IpAddress::from(end)), | ||||
|                 domain: Some(topology.domain_name.clone()), | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @ -1,73 +0,0 @@ | ||||
| //! OKDInstallationScore
 | ||||
| //!
 | ||||
| //! Overview
 | ||||
| //! --------
 | ||||
| //! OKDInstallationScore orchestrates an end-to-end, bare-metal OKD (OpenShift/OKD 4.19).
 | ||||
| //! It follows principles of “discovery-first, then provision” strategy with strict ordering,
 | ||||
| //! observable progress, and minimal assumptions about the underlying network.
 | ||||
| //!
 | ||||
| //! High-level flow
 | ||||
| //! 1) OKDSetup01Inventory
 | ||||
| //!    - Serve default iPXE + Kickstart (in-RAM CentOS Stream 9) for discovery only.
 | ||||
| //!    - Enable SSH with the cluster’s pubkey, start a Rust inventory agent.
 | ||||
| //!    - Harmony discovers nodes by scraping the agent endpoint and collects MACs/NICs.
 | ||||
| //!
 | ||||
| //! 2) OKDSetup02Bootstrap
 | ||||
| //!    - User selects which discovered node becomes bootstrap.
 | ||||
| //!    - Prepare the OKD cluster installation files
 | ||||
| //!    - Render per-MAC iPXE for bootstrap with OKD 4.19 SCOS live assets + ignition.
 | ||||
| //!    - Reboot node via SSH; install bootstrap; wait for bootstrap-complete.
 | ||||
| //!
 | ||||
| //! 3) OKDSetup03ControlPlane
 | ||||
| //!    - Render per-MAC iPXE for cp0/cp1/cp2 with ignition. Reboot via SSH, join masters.
 | ||||
| //!    - Configure network bond (where relevant) using OKD NMState MachineConfig
 | ||||
| //!
 | ||||
| //! 4) OKDSetup04Workers
 | ||||
| //!    - Render per-MAC iPXE for worker set; join workers.
 | ||||
| //!    - Configure network bond (where relevant) using OKD NMState MachineConfig
 | ||||
| //!
 | ||||
| //! 5) OKDSetup05SanityCheck
 | ||||
| //!    - Validate API/ingress/clusteroperators; ensure healthy control plane and SDN.
 | ||||
| //!
 | ||||
| //! 6) OKDSetup06InstallationReport
 | ||||
| //!    - Produce a concise, machine-readable report (JSON) and a human summary.
 | ||||
| //!
 | ||||
| //! Network notes
 | ||||
| //! - During Inventory: ports must be simple access (no LACP). DHCP succeeds; iPXE
 | ||||
| //!   loads CentOS Stream live with Kickstart and starts the inventory endpoint.
 | ||||
| //! - During Provisioning: only after SCOS is on disk and Ignition/MC can be applied
 | ||||
| //!   do we set the bond persistently. If early bonding is truly required on a host,
 | ||||
| //!   use kernel args selectively in the per-MAC PXE for that host, but never for the
 | ||||
| //!   generic discovery path.
 | ||||
| //! - This is caused by the inherent race condition between PXE, which cannot perform
 | ||||
| //!   its DHCP recovery process on a bonded network, and the bond configuration itself,
 | ||||
| //!   which must be configured on host AND switch to connect properly.
 | ||||
| //!
 | ||||
| //! Configuration knobs
 | ||||
| //! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
 | ||||
| //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
 | ||||
| 
 | ||||
| use crate::{ | ||||
|     modules::okd::{ | ||||
|         OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, | ||||
|         OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, | ||||
|         bootstrap_06_installation_report::OKDSetup06InstallationReportScore, | ||||
|     }, | ||||
|     score::Score, | ||||
|     topology::HAClusterTopology, | ||||
| }; | ||||
| 
 | ||||
| pub struct OKDInstallationPipeline; | ||||
| 
 | ||||
| impl OKDInstallationPipeline { | ||||
|     pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> { | ||||
|         vec![ | ||||
|             Box::new(OKDSetup01InventoryScore::new()), | ||||
|             Box::new(OKDSetup02BootstrapScore::new()), | ||||
|             Box::new(OKDSetup03ControlPlaneScore::new()), | ||||
|             Box::new(OKDSetup04WorkersScore::new()), | ||||
|             Box::new(OKDSetup05SanityCheckScore::new()), | ||||
|             Box::new(OKDSetup06InstallationReportScore::new()), | ||||
|         ] | ||||
|     } | ||||
| } | ||||
| @ -1,9 +1,9 @@ | ||||
| use askama::Template; | ||||
| use async_trait::async_trait; | ||||
| use derive_new::new; | ||||
| use harmony_types::net::{IpAddress, Url}; | ||||
| use harmony_types::net::Url; | ||||
| use serde::Serialize; | ||||
| use std::net::{IpAddr, Ipv4Addr}; | ||||
| use std::net::IpAddr; | ||||
| 
 | ||||
| use crate::{ | ||||
|     data::{FileContent, FilePath, Version}, | ||||
| @ -16,31 +16,29 @@ use crate::{ | ||||
| use harmony_types::id::Id; | ||||
| 
 | ||||
| #[derive(Debug, new, Clone, Serialize)] | ||||
| pub struct OKDIpxeScore { | ||||
| pub struct OkdIpxeScore { | ||||
|     pub kickstart_filename: String, | ||||
|     pub harmony_inventory_agent: String, | ||||
|     pub cluster_pubkey: FileContent, | ||||
|     pub cluster_pubkey_filename: String, | ||||
| } | ||||
| 
 | ||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OKDIpxeScore { | ||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OkdIpxeScore { | ||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||
|         Box::new(OKDIpxeInterpret::new(self.clone())) | ||||
|         Box::new(IpxeInterpret::new(self.clone())) | ||||
|     } | ||||
| 
 | ||||
|     fn name(&self) -> String { | ||||
|         "OKDipxeScore".to_string() | ||||
|         "OkdIpxeScore".to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, new, Clone)] | ||||
| pub struct OKDIpxeInterpret { | ||||
|     score: OKDIpxeScore, | ||||
| pub struct IpxeInterpret { | ||||
|     score: OkdIpxeScore, | ||||
| } | ||||
| 
 | ||||
| #[async_trait] | ||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||
|     for OKDIpxeInterpret | ||||
| { | ||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> for IpxeInterpret { | ||||
|     async fn execute( | ||||
|         &self, | ||||
|         inventory: &Inventory, | ||||
| @ -48,32 +46,19 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||
|     ) -> Result<Outcome, InterpretError> { | ||||
|         let gateway_ip = topology.get_gateway(); | ||||
| 
 | ||||
|         let dhcp_server_ip = match DhcpServer::get_ip(topology) { | ||||
|             std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, | ||||
|             std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), | ||||
|         }; | ||||
| 
 | ||||
|         // TODO this could overflow, we should use proper subnet maths here instead of an ip
 | ||||
|         // address and guessing the subnet size from there
 | ||||
|         let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); | ||||
|         let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); | ||||
| 
 | ||||
|         let scores: Vec<Box<dyn Score<T>>> = vec![ | ||||
|             Box::new(DhcpScore { | ||||
|                 host_binding: vec![], | ||||
|                 domain: None, | ||||
|                 next_server: Some(topology.get_gateway()), | ||||
|                 boot_filename: None, | ||||
|                 filename: Some("undionly.kpxe".to_string()), | ||||
|                 filename64: Some("ipxe.efi".to_string()), | ||||
|                 filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), | ||||
|                 dhcp_range: (IpAddress::from(start), IpAddress::from(end)), | ||||
|             }), | ||||
|             Box::new(TftpScore { | ||||
|                 files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), | ||||
|             }), | ||||
|             Box::new(StaticFilesHttpScore { | ||||
|                 remote_path: None, | ||||
|                 // TODO The current russh based copy is way too slow, check for a lib update or use scp
 | ||||
|                 // when available
 | ||||
|                 //
 | ||||
| @ -95,7 +80,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||
|                         content: InventoryKickstartTpl { | ||||
|                             gateway_ip: &gateway_ip, | ||||
|                             harmony_inventory_agent: &self.score.harmony_inventory_agent, | ||||
|                             cluster_pubkey_filename: &self.score.cluster_pubkey.path.to_string(), | ||||
|                             cluster_pubkey_filename: &self.score.cluster_pubkey_filename, | ||||
|                         } | ||||
|                         .to_string(), | ||||
|                     }, | ||||
| @ -107,7 +92,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||
|                         } | ||||
|                         .to_string(), | ||||
|                     }, | ||||
|                     self.score.cluster_pubkey.clone(), | ||||
|                 ], | ||||
|             }), | ||||
|         ]; | ||||
| @ -123,7 +107,6 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||
|                 Err(e) => return Err(e), | ||||
|             }; | ||||
|         } | ||||
|         inquire::Confirm::new(&format!("Execute the copy : `scp -r data/pxe/okd/http_files/* root@{}:/usr/local/http/` and confirm when done to continue", HttpServer::get_ip(topology))).prompt().expect("Prompt error"); | ||||
| 
 | ||||
|         Ok(Outcome::success("Ipxe installed".to_string())) | ||||
|     } | ||||
|  | ||||
| @ -8,7 +8,7 @@ use crate::{ | ||||
|     score::Score, | ||||
|     topology::{ | ||||
|         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, | ||||
|         LoadBalancerService, SSL, Topology, | ||||
|         LoadBalancerService, Topology, | ||||
|     }, | ||||
| }; | ||||
| 
 | ||||
| @ -62,7 +62,6 @@ impl OKDLoadBalancerScore { | ||||
|                     "/readyz".to_string(), | ||||
|                     HttpMethod::GET, | ||||
|                     HttpStatusCode::Success2xx, | ||||
|                     SSL::SSL, | ||||
|                 )), | ||||
|             }, | ||||
|         ]; | ||||
|  | ||||
| @ -1,21 +1,7 @@ | ||||
| mod bootstrap_01_prepare; | ||||
| mod bootstrap_02_bootstrap; | ||||
| mod bootstrap_03_control_plane; | ||||
| mod bootstrap_04_workers; | ||||
| mod bootstrap_05_sanity_check; | ||||
| mod bootstrap_06_installation_report; | ||||
| pub mod bootstrap_dhcp; | ||||
| pub mod bootstrap_load_balancer; | ||||
| pub mod dhcp; | ||||
| pub mod dns; | ||||
| pub mod installation; | ||||
| pub mod ipxe; | ||||
| pub mod load_balancer; | ||||
| pub mod templates; | ||||
| pub mod upgrade; | ||||
| pub use bootstrap_01_prepare::*; | ||||
| pub use bootstrap_02_bootstrap::*; | ||||
| pub use bootstrap_03_control_plane::*; | ||||
| pub use bootstrap_04_workers::*; | ||||
| pub use bootstrap_05_sanity_check::*; | ||||
| pub use bootstrap_06_installation_report::*; | ||||
|  | ||||
| @ -1,20 +0,0 @@ | ||||
| use askama::Template; | ||||
| 
 | ||||
| #[derive(Template)] | ||||
| #[template(path = "okd/install-config.yaml.j2")] | ||||
| pub struct InstallConfigYaml<'a> { | ||||
|     pub cluster_domain: &'a str, | ||||
|     pub pull_secret: &'a str, | ||||
|     pub ssh_public_key: &'a str, | ||||
|     pub cluster_name: &'a str, | ||||
| } | ||||
| 
 | ||||
| #[derive(Template)] | ||||
| #[template(path = "okd/bootstrap.ipxe.j2")] | ||||
| pub struct BootstrapIpxeTpl<'a> { | ||||
|     pub http_ip: &'a str, | ||||
|     pub scos_path: &'a str, | ||||
|     pub installation_device: &'a str, | ||||
|     pub ignition_http_path: &'a str, | ||||
|     pub ignition_file_name: &'static str, | ||||
| } | ||||
| @ -1,4 +1,4 @@ | ||||
| use std::sync::Arc; | ||||
| use std::{sync::Arc, time::Duration}; | ||||
| 
 | ||||
| use async_trait::async_trait; | ||||
| use log::debug; | ||||
|  | ||||
| @ -1,63 +1,6 @@ | ||||
| #!ipxe | ||||
| 
 | ||||
| # iPXE Chainloading Script | ||||
| # | ||||
| # Attempts to load a host-specific configuration file. If that fails, | ||||
| # it logs the failure, waits for a few seconds, and then attempts to | ||||
| # load a generic fallback configuration. | ||||
| 
 | ||||
| # --- Configuration --- | ||||
| set base-url http://{{ gateway_ip }}:8080 | ||||
| set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe | ||||
| set fallbackfile ${base-url}/fallback.ipxe | ||||
| 
 | ||||
| # --- Script Logic --- | ||||
| 
 | ||||
| echo | ||||
| echo "========================================" | ||||
| echo "      iPXE Network Boot Initiated" | ||||
| echo "========================================" | ||||
| echo "Client MAC Address: ${mac}" | ||||
| echo "Boot Server URL:    ${base-url}" | ||||
| echo | ||||
| 
 | ||||
| # --- Primary Boot Attempt --- | ||||
| echo "--> Attempting to load host-specific script..." | ||||
| echo "    Location: ${hostfile}" | ||||
| 
 | ||||
| sleep 2 | ||||
| 
 | ||||
| # The "&& exit ||" pattern works as follows: | ||||
| # 1. iPXE attempts to 'chain' the hostfile. | ||||
| # 2. If successful (returns 0), the "&& exit" part is executed, and this script terminates. | ||||
| # 3. If it fails (returns non-zero), the "||" part is triggered, and execution continues below. | ||||
| chain --autofree --replace ${hostfile} && exit || | ||||
| 
 | ||||
| # --- Fallback Boot Attempt --- | ||||
| # This part of the script is only reached if the 'chain ${hostfile}' command above failed. | ||||
| echo | ||||
| echo "--> Host-specific script not found or failed to load." | ||||
| echo | ||||
| 
 | ||||
| echo | ||||
| echo "--> Attempting to load fallback script..." | ||||
| echo "    Location: ${fallbackfile}" | ||||
| 
 | ||||
| sleep 8 | ||||
| 
 | ||||
| chain --autofree --replace ${fallbackfile} && exit || | ||||
| 
 | ||||
| # --- Final Failure --- | ||||
| # This part is only reached if BOTH chain commands have failed. | ||||
| echo | ||||
| echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||
| echo "    FATAL: All boot scripts failed!" | ||||
| echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||
| echo "Could not load either the host-specific script or the fallback script." | ||||
| echo "Dropping to iPXE shell for manual troubleshooting in 10 seconds." | ||||
| sleep 8 | ||||
| 
 | ||||
| shell | ||||
| 
 | ||||
| # A final exit is good practice, though 'shell' is a blocking command. | ||||
| exit | ||||
| chain ${hostfile} || chain ${base-url}/fallback.ipxe | ||||
|  | ||||
| @ -1,52 +0,0 @@ | ||||
| #!ipxe | ||||
| 
 | ||||
| # ================================================================== | ||||
| #    MAC-Specific Boot Script for CoreOS/FCOS Installation | ||||
| # ================================================================== | ||||
| 
 | ||||
| # --- Configuration --- | ||||
| set http_ip {{ http_ip }} | ||||
| set scos_path {{ scos_path }} | ||||
| set inst_dev {{ installation_device }} | ||||
| set ign_path {{ ignition_http_path }} | ||||
| set ign_file {{ ignition_file_name }} | ||||
| 
 | ||||
| # --- Derived Variables --- | ||||
| set base-url http://${http_ip}:8080 | ||||
| set scos-base-url ${base-url}/${scos_path} | ||||
| set ignition-url ${base-url}/${ign_path}/${ign_file} | ||||
| 
 | ||||
| # --- Pre-boot Logging & Verification --- | ||||
| echo | ||||
| echo "Starting MAC-specific installation..." | ||||
| echo "--------------------------------------------------" | ||||
| echo "  Installation Device: ${inst_dev}" | ||||
| echo "  CoreOS Kernel URL:   ${scos-base-url}/scos-live-kernel.x86_64" | ||||
| echo "  Ignition URL:        ${ignition-url}" | ||||
| echo "--------------------------------------------------" | ||||
| echo "Waiting for 3 seconds before loading boot assets..." | ||||
| sleep 3 | ||||
| 
 | ||||
| # --- Load Boot Assets with Failure Checks --- | ||||
| # The '|| goto failure' pattern provides a clean exit if any asset fails to load. | ||||
| echo "Loading kernel..." | ||||
| kernel ${scos-base-url}/scos-live-kernel.x86_64 initrd=main coreos.live.rootfs_url=${scos-base-url}/scos-live-rootfs.x86_64.img coreos.inst.install_dev=${inst_dev} coreos.inst.ignition_url=${ignition-url} || goto failure | ||||
| 
 | ||||
| echo "Loading initramfs..." | ||||
| initrd --name main ${scos-base-url}/scos-live-initramfs.x86_64.img || goto failure | ||||
| 
 | ||||
| # --- Boot --- | ||||
| echo "All assets loaded successfully. Starting boot process..." | ||||
| boot || goto failure | ||||
| 
 | ||||
| # This part is never reached on successful boot. | ||||
| 
 | ||||
| # --- Failure Handling --- | ||||
| :failure | ||||
| echo | ||||
| echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||
| echo "  ERROR: A boot component failed to load." | ||||
| echo "  Dropping to iPXE shell for manual debugging." | ||||
| echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||
| sleep 10 | ||||
| shell | ||||
| @ -1,24 +0,0 @@ | ||||
| # Built from https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-bare-metal-config-yaml_installing-bare-metal | ||||
| apiVersion: v1 | ||||
| baseDomain: {{ cluster_domain }} | ||||
| compute:  | ||||
| - hyperthreading: Enabled  | ||||
|   name: worker | ||||
|   replicas: 0  | ||||
| controlPlane:  | ||||
|   hyperthreading: Enabled  | ||||
|   name: master | ||||
|   replicas: 3  | ||||
| metadata: | ||||
|   name: {{ cluster_name }}  | ||||
| networking: | ||||
|   clusterNetwork: | ||||
|   - cidr: 10.128.0.0/14  | ||||
|     hostPrefix: 23  | ||||
|   networkType: OVNKubernetes  | ||||
|   serviceNetwork:  | ||||
|   - 172.30.0.0/16 | ||||
| platform: | ||||
|   none: {}  | ||||
| pullSecret: '{{ pull_secret|safe }}'  | ||||
| sshKey: '{{ ssh_public_key }}'  | ||||
| @ -18,7 +18,6 @@ infisical = { git = "https://github.com/jggc/rust-sdk.git", branch = "patch-1" } | ||||
| tokio.workspace = true | ||||
| async-trait.workspace = true | ||||
| http.workspace = true | ||||
| inquire.workspace = true | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| pretty_assertions.workspace = true | ||||
|  | ||||
| @ -9,7 +9,6 @@ use config::INFISICAL_ENVIRONMENT; | ||||
| use config::INFISICAL_PROJECT_ID; | ||||
| use config::INFISICAL_URL; | ||||
| use config::SECRET_STORE; | ||||
| use log::debug; | ||||
| use serde::{Serialize, de::DeserializeOwned}; | ||||
| use std::fmt; | ||||
| use store::InfisicalSecretStore; | ||||
| @ -102,7 +101,6 @@ impl SecretManager { | ||||
|     /// Retrieves and deserializes a secret.
 | ||||
|     pub async fn get<T: Secret>() -> Result<T, SecretStoreError> { | ||||
|         let manager = get_secret_manager().await; | ||||
|         debug!("Getting secret ns {} key {}", &manager.namespace, T::KEY); | ||||
|         let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?; | ||||
|         serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization { | ||||
|             key: T::KEY.to_string(), | ||||
| @ -110,42 +108,6 @@ impl SecretManager { | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_or_prompt<T: Secret>() -> Result<T, SecretStoreError> { | ||||
|         let secret = Self::get::<T>().await; | ||||
|         let manager = get_secret_manager().await; | ||||
|         let prompted = secret.is_err(); | ||||
| 
 | ||||
|         let secret = secret.or_else(|e| -> Result<T, SecretStoreError> { | ||||
|             debug!("Could not get secret : {e}"); | ||||
| 
 | ||||
|             let ns = &manager.namespace; | ||||
|             let key = T::KEY; | ||||
|             let secret_json = inquire::Text::new(&format!( | ||||
|                 "Secret not found for {} {}, paste the JSON here :", | ||||
|                 ns, key | ||||
|             )) | ||||
|             .prompt() | ||||
|             .map_err(|e| { | ||||
|                 SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into()) | ||||
|             })?; | ||||
| 
 | ||||
|             let secret: T = serde_json::from_str(&secret_json).map_err(|e| { | ||||
|                 SecretStoreError::Deserialization { | ||||
|                     key: T::KEY.to_string(), | ||||
|                     source: e, | ||||
|                 } | ||||
|             })?; | ||||
| 
 | ||||
|             Ok(secret) | ||||
|         })?; | ||||
| 
 | ||||
|         if prompted { | ||||
|             Self::set(&secret).await?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(secret) | ||||
|     } | ||||
| 
 | ||||
|     /// Serializes and stores a secret.
 | ||||
|     pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> { | ||||
|         let manager = get_secret_manager().await; | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| use async_trait::async_trait; | ||||
| use log::{debug, info}; | ||||
| use log::info; | ||||
| use std::path::{Path, PathBuf}; | ||||
| 
 | ||||
| use crate::{SecretStore, SecretStoreError}; | ||||
| @ -24,7 +24,7 @@ impl SecretStore for LocalFileSecretStore { | ||||
|             .join("secrets"); | ||||
| 
 | ||||
|         let file_path = Self::get_file_path(&data_dir, ns, key); | ||||
|         debug!( | ||||
|         info!( | ||||
|             "LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}", | ||||
|             file_path.display() | ||||
|         ); | ||||
|  | ||||
| @ -48,12 +48,6 @@ impl From<String> for Id { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl From<Id> for String { | ||||
|     fn from(value: Id) -> Self { | ||||
|         value.to_string() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl std::fmt::Display for Id { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         f.write_str(&self.value) | ||||
|  | ||||
| @ -21,7 +21,7 @@ impl From<&MacAddress> for String { | ||||
| 
 | ||||
| impl std::fmt::Display for MacAddress { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         f.write_str(&String::from(self)) | ||||
|         f.write_fmt(format_args!("MacAddress {}", String::from(self))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,5 +0,0 @@ | ||||
| CREATE TABLE IF NOT EXISTS host_role_mapping ( | ||||
|     id INTEGER PRIMARY KEY AUTOINCREMENT, | ||||
|     host_id TEXT NOT NULL, | ||||
|     role TEXT NOT NULL | ||||
| ); | ||||
| @ -36,27 +36,6 @@ pub struct DnsMasq { | ||||
|     pub dhcp_options: Vec<DhcpOptions>, | ||||
|     pub dhcp_boot: Vec<DhcpBoot>, | ||||
|     pub dhcp_tags: Vec<RawXml>, | ||||
|     pub hosts: Vec<DnsmasqHost>, | ||||
| } | ||||
| 
 | ||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)] | ||||
| #[yaserde(rename = "hosts")] | ||||
| pub struct DnsmasqHost { | ||||
|     #[yaserde(attribute = true)] | ||||
|     pub uuid: String, | ||||
|     pub host: String, | ||||
|     pub domain: MaybeString, | ||||
|     pub local: MaybeString, | ||||
|     pub ip: MaybeString, | ||||
|     pub cnames: MaybeString, | ||||
|     pub client_id: MaybeString, | ||||
|     pub hwaddr: MaybeString, | ||||
|     pub lease_time: MaybeString, | ||||
|     pub ignore: Option<u8>, | ||||
|     pub set_tag: MaybeString, | ||||
|     pub descr: MaybeString, | ||||
|     pub comments: MaybeString, | ||||
|     pub aliases: MaybeString, | ||||
| } | ||||
| 
 | ||||
| // Represents the <dhcp> element and its nested fields.
 | ||||
|  | ||||
| @ -189,7 +189,7 @@ pub struct System { | ||||
|     pub timeservers: String, | ||||
|     pub webgui: WebGui, | ||||
|     pub usevirtualterminal: u8, | ||||
|     pub disablenatreflection: Option<String>, | ||||
|     pub disablenatreflection: String, | ||||
|     pub disableconsolemenu: u8, | ||||
|     pub disablevlanhwfilter: u8, | ||||
|     pub disablechecksumoffloading: u8, | ||||
| @ -256,7 +256,7 @@ pub struct Firmware { | ||||
|     #[yaserde(rename = "type")] | ||||
|     pub firmware_type: MaybeString, | ||||
|     pub subscription: MaybeString, | ||||
|     pub reboot: Option<MaybeString>, | ||||
|     pub reboot: MaybeString, | ||||
| } | ||||
| 
 | ||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||
| @ -267,12 +267,12 @@ pub struct Bogons { | ||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||
| pub struct Group { | ||||
|     pub name: String, | ||||
|     pub description: Option<String>, | ||||
|     pub description: String, | ||||
|     pub scope: String, | ||||
|     pub gid: u32, | ||||
|     pub member: String, | ||||
|     pub member: Vec<u32>, | ||||
|     #[yaserde(rename = "priv")] | ||||
|     pub priv_field: Option<String>, | ||||
|     pub priv_field: String, | ||||
|     pub source_networks: Option<MaybeString>, | ||||
| } | ||||
| 
 | ||||
| @ -1449,9 +1449,6 @@ pub struct Vip { | ||||
|     pub advbase: Option<MaybeString>, | ||||
|     pub advskew: Option<MaybeString>, | ||||
|     pub descr: Option<MaybeString>, | ||||
|     pub peer: Option<MaybeString>, | ||||
|     pub peer6: Option<MaybeString>, | ||||
|     pub nosync: Option<MaybeString>, | ||||
| } | ||||
| 
 | ||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||
|  | ||||
| @ -21,7 +21,6 @@ serde_json = "1.0.133" | ||||
| tokio-util = { version = "0.7.13", features = ["codec"] } | ||||
| tokio-stream = "0.1.17" | ||||
| uuid.workspace = true | ||||
| sha2 = "0.10.9" | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| pretty_assertions.workspace = true | ||||
|  | ||||
| @ -1,10 +1,10 @@ | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| use crate::{ | ||||
|     config::{check_hash, get_hash, SshConfigManager, SshCredentials, SshOPNSenseShell}, | ||||
|     config::{SshConfigManager, SshCredentials, SshOPNSenseShell}, | ||||
|     error::Error, | ||||
|     modules::{ | ||||
|         caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig, | ||||
|         caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::DnsConfig, | ||||
|         dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig, | ||||
|     }, | ||||
| }; | ||||
| @ -12,7 +12,6 @@ use log::{debug, info, trace, warn}; | ||||
| use opnsense_config_xml::OPNsense; | ||||
| use russh::client; | ||||
| use serde::Serialize; | ||||
| use sha2::Digest; | ||||
| 
 | ||||
| use super::{ConfigManager, OPNsenseShell}; | ||||
| 
 | ||||
| @ -21,7 +20,6 @@ pub struct Config { | ||||
|     opnsense: OPNsense, | ||||
|     repository: Arc<dyn ConfigManager>, | ||||
|     shell: Arc<dyn OPNsenseShell>, | ||||
|     hash: String, | ||||
| } | ||||
| 
 | ||||
| impl Serialize for Config { | ||||
| @ -38,10 +36,8 @@ impl Config { | ||||
|         repository: Arc<dyn ConfigManager>, | ||||
|         shell: Arc<dyn OPNsenseShell>, | ||||
|     ) -> Result<Self, Error> { | ||||
|         let (opnsense, hash) = Self::get_opnsense_instance(repository.clone()).await?; | ||||
|         Ok(Self { | ||||
|             opnsense, | ||||
|             hash, | ||||
|             opnsense: Self::get_opnsense_instance(repository.clone()).await?, | ||||
|             repository, | ||||
|             shell, | ||||
|         }) | ||||
| @ -55,8 +51,8 @@ impl Config { | ||||
|         DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn dns(&mut self) -> DhcpConfigDnsMasq<'_> { | ||||
|         DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) | ||||
|     pub fn dns(&mut self) -> DnsConfig<'_> { | ||||
|         DnsConfig::new(&mut self.opnsense) | ||||
|     } | ||||
| 
 | ||||
|     pub fn tftp(&mut self) -> TftpConfig<'_> { | ||||
| @ -150,7 +146,7 @@ impl Config { | ||||
| 
 | ||||
|     async fn reload_config(&mut self) -> Result<(), Error> { | ||||
|         info!("Reloading opnsense live config"); | ||||
|         let (opnsense, sha2) = Self::get_opnsense_instance(self.repository.clone()).await?; | ||||
|         self.opnsense = Self::get_opnsense_instance(self.repository.clone()).await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
| @ -162,15 +158,14 @@ impl Config { | ||||
|     /// Save the config to the repository. This method is meant NOT to reload services, only save
 | ||||
|     /// the config to the live file/database and perhaps take a backup when relevant.
 | ||||
|     pub async fn save(&self) -> Result<(), Error> { | ||||
|         let xml = &self.opnsense.to_xml(); | ||||
|         self.repository.save_config(xml, &self.hash).await | ||||
|         self.repository.save_config(&self.opnsense.to_xml()).await | ||||
|     } | ||||
| 
 | ||||
|     /// Save the configuration and reload all services. Be careful with this one as it will cause
 | ||||
|     /// downtime in many cases, such as a PPPoE renegociation
 | ||||
|     pub async fn apply(&self) -> Result<(), Error> { | ||||
|         self.repository | ||||
|             .apply_new_config(&self.opnsense.to_xml(), &self.hash) | ||||
|             .apply_new_config(&self.opnsense.to_xml()) | ||||
|             .await | ||||
|     } | ||||
| 
 | ||||
| @ -198,14 +193,11 @@ impl Config { | ||||
|         Config::new(manager, shell).await.unwrap() | ||||
|     } | ||||
| 
 | ||||
|     async fn get_opnsense_instance( | ||||
|         repository: Arc<dyn ConfigManager>, | ||||
|     ) -> Result<(OPNsense, String), Error> { | ||||
|     async fn get_opnsense_instance(repository: Arc<dyn ConfigManager>) -> Result<OPNsense, Error> { | ||||
|         let xml = repository.load_as_str().await?; | ||||
|         trace!("xml {}", xml); | ||||
| 
 | ||||
|         let hash = get_hash(&xml); | ||||
|         Ok((OPNsense::from(xml), hash)) | ||||
|         Ok(OPNsense::from(xml)) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run_command(&self, command: &str) -> Result<String, Error> { | ||||
| @ -227,14 +219,13 @@ mod tests { | ||||
|     #[tokio::test] | ||||
|     async fn test_load_config_from_local_file() { | ||||
|         for path in [ | ||||
|             // "src/tests/data/config-opnsense-25.1.xml",
 | ||||
|             // "src/tests/data/config-vm-test.xml",
 | ||||
|             "src/tests/data/config-opnsense-25.1.xml", | ||||
|             "src/tests/data/config-vm-test.xml", | ||||
|             "src/tests/data/config-structure.xml", | ||||
|             "src/tests/data/config-full-1.xml", | ||||
|             // "src/tests/data/config-full-ncd0.xml",
 | ||||
|             // "src/tests/data/config-full-25.7.xml",
 | ||||
|             // "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
 | ||||
|             "src/tests/data/config-25.7-dnsmasq-static-host.xml", | ||||
|             "src/tests/data/config-full-ncd0.xml", | ||||
|             "src/tests/data/config-full-25.7.xml", | ||||
|             "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml", | ||||
|         ] { | ||||
|             let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); | ||||
|             test_file_path.push(path); | ||||
| @ -252,13 +243,13 @@ mod tests { | ||||
| 
 | ||||
|             let serialized = config.opnsense.to_xml(); | ||||
| 
 | ||||
|             fs::write("/tmp/serialized.xml", &serialized).unwrap(); | ||||
| 
 | ||||
|             // Since the order of all fields is not always the same in opnsense config files
 | ||||
|             // I think it is good enough to have exactly the same amount of the same lines
 | ||||
|             let mut before = config_file_str.lines().collect::<Vec<_>>(); | ||||
|             let mut after = serialized.lines().collect::<Vec<_>>(); | ||||
|             before.sort(); | ||||
|             after.sort(); | ||||
|             assert_eq!(before, after); | ||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); | ||||
|             assert_eq!((), ()); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -288,6 +279,8 @@ mod tests { | ||||
| 
 | ||||
|         let serialized = config.opnsense.to_xml(); | ||||
| 
 | ||||
|         fs::write("/tmp/serialized.xml", &serialized).unwrap(); | ||||
| 
 | ||||
|         let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); | ||||
|         test_file_path.push("src/tests/data/config-structure-with-dhcp-staticmap-entry.xml"); | ||||
| 
 | ||||
|  | ||||
| @ -1,4 +1,3 @@ | ||||
| use crate::config::check_hash; | ||||
| use crate::config::manager::ConfigManager; | ||||
| use crate::error::Error; | ||||
| use async_trait::async_trait; | ||||
| @ -21,17 +20,11 @@ impl ConfigManager for LocalFileConfigManager { | ||||
|         Ok(fs::read_to_string(&self.file_path)?) | ||||
|     } | ||||
| 
 | ||||
|     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||
|         let current_content = self.load_as_str().await?; | ||||
|         if !check_hash(¤t_content, hash) { | ||||
|             return Err(Error::Config(format!( | ||||
|                 "OPNSense config file changed since loading it! Hash when loading : {hash}" | ||||
|             ))); | ||||
|         } | ||||
|     async fn save_config(&self, content: &str) -> Result<(), Error> { | ||||
|         Ok(fs::write(&self.file_path, content)?) | ||||
|     } | ||||
| 
 | ||||
|     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||
|         self.save_config(content, hash).await | ||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error> { | ||||
|         self.save_config(content).await | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -9,8 +9,6 @@ use crate::Error; | ||||
| #[async_trait] | ||||
| pub trait ConfigManager: std::fmt::Debug + Send + Sync { | ||||
|     async fn load_as_str(&self) -> Result<String, Error>; | ||||
|     /// Save a new version of the config file, making sure that the hash still represents the file
 | ||||
|     /// currently stored in /conf/config.xml
 | ||||
|     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error>; | ||||
|     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error>; | ||||
|     async fn save_config(&self, content: &str) -> Result<(), Error>; | ||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error>; | ||||
| } | ||||
|  | ||||
| @ -1,9 +1,8 @@ | ||||
| use crate::config::{manager::ConfigManager, OPNsenseShell}; | ||||
| use crate::error::Error; | ||||
| use async_trait::async_trait; | ||||
| use log::{info, warn}; | ||||
| use log::info; | ||||
| use russh_keys::key::KeyPair; | ||||
| use sha2::Digest; | ||||
| use std::sync::Arc; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| @ -36,10 +35,10 @@ impl SshConfigManager { | ||||
|             .await | ||||
|     } | ||||
| 
 | ||||
|     async fn copy_to_live_config(&self, new_config_path: &str) -> Result<String, Error> { | ||||
|     async fn move_to_live_config(&self, new_config_path: &str) -> Result<String, Error> { | ||||
|         info!("Overwriting OPNSense /conf/config.xml with {new_config_path}"); | ||||
|         self.opnsense_shell | ||||
|             .exec(&format!("cp {new_config_path} /conf/config.xml")) | ||||
|             .exec(&format!("mv {new_config_path} /conf/config.xml")) | ||||
|             .await | ||||
|     } | ||||
| 
 | ||||
| @ -57,41 +56,19 @@ impl ConfigManager for SshConfigManager { | ||||
|         self.opnsense_shell.exec("cat /conf/config.xml").await | ||||
|     } | ||||
| 
 | ||||
|     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||
|         let current_content = self.load_as_str().await?; | ||||
| 
 | ||||
|         if !check_hash(¤t_content, hash) { | ||||
|             warn!("OPNSense config file changed since loading it! Hash when loading : {hash}"); | ||||
|             // return Err(Error::Config(format!(
 | ||||
|             //     "OPNSense config file changed since loading it! Hash when loading : {hash}"
 | ||||
|             // )));
 | ||||
|         } | ||||
| 
 | ||||
|     async fn save_config(&self, content: &str) -> Result<(), Error> { | ||||
|         let temp_filename = self | ||||
|             .opnsense_shell | ||||
|             .write_content_to_temp_file(content) | ||||
|             .await?; | ||||
|         self.backup_config_remote().await?; | ||||
|         self.copy_to_live_config(&temp_filename).await?; | ||||
|         self.move_to_live_config(&temp_filename).await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||
|         self.save_config(content, &hash).await?; | ||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error> { | ||||
|         self.save_config(content).await?; | ||||
|         self.reload_all_services().await?; | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub fn get_hash(content: &str) -> String { | ||||
|     let mut hasher = sha2::Sha256::new(); | ||||
|     hasher.update(content.as_bytes()); | ||||
|     let hash_bytes = hasher.finalize(); | ||||
|     let hash_string = format!("{:x}", hash_bytes); | ||||
|     info!("Loaded OPNSense config.xml with hash {hash_string:?}"); | ||||
|     hash_string | ||||
| } | ||||
| 
 | ||||
| pub fn check_hash(content: &str, source_hash: &str) -> bool { | ||||
|     get_hash(content) == source_hash | ||||
| } | ||||
|  | ||||
| @ -39,7 +39,7 @@ impl OPNsenseShell for SshOPNSenseShell { | ||||
| 
 | ||||
|     async fn write_content_to_temp_file(&self, content: &str) -> Result<String, Error> { | ||||
|         let temp_filename = format!( | ||||
|             "/conf/harmony/opnsense-config-{}", | ||||
|             "/tmp/opnsense-config-tmp-config_{}", | ||||
|             SystemTime::now() | ||||
|                 .duration_since(UNIX_EPOCH) | ||||
|                 .unwrap() | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| #[derive(Debug, PartialEq)] | ||||
| #[derive(Debug)] | ||||
| pub enum DhcpError { | ||||
|     InvalidMacAddress(String), | ||||
|     InvalidIpAddress(String), | ||||
|  | ||||
| @ -1,10 +1,10 @@ | ||||
| use opnsense_config_xml::{Host, OPNsense}; | ||||
| 
 | ||||
| pub struct UnboundDnsConfig<'a> { | ||||
| pub struct DnsConfig<'a> { | ||||
|     opnsense: &'a mut OPNsense, | ||||
| } | ||||
| 
 | ||||
| impl<'a> UnboundDnsConfig<'a> { | ||||
| impl<'a> DnsConfig<'a> { | ||||
|     pub fn new(opnsense: &'a mut OPNsense) -> Self { | ||||
|         Self { opnsense } | ||||
|     } | ||||
|  | ||||
| @ -1,12 +1,9 @@ | ||||
| // dnsmasq.rs
 | ||||
| use crate::modules::dhcp::DhcpError; | ||||
| use log::{debug, info, warn}; | ||||
| use opnsense_config_xml::dnsmasq::{DhcpRange, DnsMasq, DnsmasqHost}; // Assuming DhcpRange is defined in opnsense_config_xml::dnsmasq
 | ||||
| use log::{debug, info}; | ||||
| use opnsense_config_xml::{MaybeString, StaticMap}; | ||||
| use std::collections::HashSet; | ||||
| use std::net::Ipv4Addr; | ||||
| use std::sync::Arc; | ||||
| use uuid::Uuid; | ||||
| 
 | ||||
| use opnsense_config_xml::OPNsense; | ||||
| 
 | ||||
| @ -28,167 +25,74 @@ impl<'a> DhcpConfigDnsMasq<'a> { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Removes a MAC address from a static mapping.
 | ||||
|     /// If the mapping has no other MAC addresses associated with it, the entire host entry is removed.
 | ||||
|     pub fn remove_static_mapping(&mut self, mac_to_remove: &str) { | ||||
|         let dnsmasq = self.get_dnsmasq(); | ||||
| 
 | ||||
|         // Update hwaddr fields for hosts that contain the MAC, removing it from the comma-separated list.
 | ||||
|         for host in dnsmasq.hosts.iter_mut() { | ||||
|             let mac = host.hwaddr.content_string(); | ||||
|             let original_macs: Vec<&str> = mac.split(',').collect(); | ||||
|             if original_macs | ||||
|                 .iter() | ||||
|                 .any(|m| m.eq_ignore_ascii_case(mac_to_remove)) | ||||
|             { | ||||
|                 let updated_macs: Vec<&str> = original_macs | ||||
|                     .into_iter() | ||||
|                     .filter(|m| !m.eq_ignore_ascii_case(mac_to_remove)) | ||||
|                     .collect(); | ||||
|                 host.hwaddr = updated_macs.join(",").into(); | ||||
|             } | ||||
|     /// Removes a static mapping by its MAC address.
 | ||||
|     /// Static mappings are stored in the <dhcpd> section of the config, shared with the ISC module.
 | ||||
|     pub fn remove_static_mapping(&mut self, mac: &str) { | ||||
|         let lan_dhcpd = self.get_lan_dhcpd(); | ||||
|         lan_dhcpd | ||||
|             .staticmaps | ||||
|             .retain(|static_entry| static_entry.mac != mac); | ||||
|     } | ||||
| 
 | ||||
|         // Remove any host entries that no longer have any MAC addresses.
 | ||||
|         dnsmasq | ||||
|             .hosts | ||||
|             .retain(|host_entry| !host_entry.hwaddr.content_string().is_empty()); | ||||
|     /// Retrieves a mutable reference to the LAN interface's DHCP configuration.
 | ||||
|     /// This is located in the shared <dhcpd> section of the config.
 | ||||
|     fn get_lan_dhcpd(&mut self) -> &mut opnsense_config_xml::DhcpInterface { | ||||
|         &mut self | ||||
|             .opnsense | ||||
|             .dhcpd | ||||
|             .elements | ||||
|             .iter_mut() | ||||
|             .find(|(name, _config)| name == "lan") | ||||
|             .expect("Interface lan should have dhcpd activated") | ||||
|             .1 | ||||
|     } | ||||
| 
 | ||||
|     /// Retrieves a mutable reference to the DnsMasq configuration.
 | ||||
|     /// This is located in the <dnsmasq> section of the OPNsense config.
 | ||||
|     fn get_dnsmasq(&mut self) -> &mut DnsMasq { | ||||
|         self.opnsense | ||||
|             .dnsmasq | ||||
|             .as_mut() | ||||
|             .expect("Dnsmasq config must be initialized") | ||||
|     } | ||||
| 
 | ||||
|     /// Adds or updates a static DHCP mapping.
 | ||||
|     ///
 | ||||
|     /// This function implements specific logic to handle existing entries:
 | ||||
|     /// - If no host exists for the given IP or hostname, a new entry is created.
 | ||||
|     /// - If exactly one host exists for the IP and/or hostname, the new MAC is appended to it.
 | ||||
|     /// - It will error if the IP and hostname exist but point to two different host entries,
 | ||||
|     ///   as this represents an unresolvable conflict.
 | ||||
|     /// - It will also error if multiple entries are found for the IP or hostname, indicating an
 | ||||
|     ///   ambiguous state.
 | ||||
|     /// Adds a new static DHCP mapping.
 | ||||
|     /// Validates the MAC address and checks for existing mappings to prevent conflicts.
 | ||||
|     pub fn add_static_mapping( | ||||
|         &mut self, | ||||
|         mac: &Vec<String>, | ||||
|         ipaddr: &Ipv4Addr, | ||||
|         mac: &str, | ||||
|         ipaddr: Ipv4Addr, | ||||
|         hostname: &str, | ||||
|     ) -> Result<(), DhcpError> { | ||||
|         let mut hostname_split = hostname.split("."); | ||||
|         let hostname = hostname_split.next().expect("hostname cannot be empty"); | ||||
|         let domain_name = hostname_split.collect::<Vec<&str>>().join("."); | ||||
|         let mac = mac.to_string(); | ||||
|         let hostname = hostname.to_string(); | ||||
|         let lan_dhcpd = self.get_lan_dhcpd(); | ||||
|         let existing_mappings: &mut Vec<StaticMap> = &mut lan_dhcpd.staticmaps; | ||||
| 
 | ||||
|         if let Some(m) = mac.iter().find(|m| !Self::is_valid_mac(m)) { | ||||
|             return Err(DhcpError::InvalidMacAddress(m.to_string())); | ||||
|         if !Self::is_valid_mac(&mac) { | ||||
|             return Err(DhcpError::InvalidMacAddress(mac)); | ||||
|         } | ||||
| 
 | ||||
|         let ip_str = ipaddr.to_string(); | ||||
|         let hosts = &mut self.get_dnsmasq().hosts; | ||||
|         // TODO: Validate that the IP address is within a configured DHCP range.
 | ||||
| 
 | ||||
|         let ip_indices: Vec<usize> = hosts | ||||
|         if existing_mappings | ||||
|             .iter() | ||||
|             .enumerate() | ||||
|             .filter(|(_, h)| h.ip.content_string() == ip_str) | ||||
|             .map(|(i, _)| i) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let hostname_indices: Vec<usize> = hosts | ||||
|             .iter() | ||||
|             .enumerate() | ||||
|             .filter(|(_, h)| h.host == hostname) | ||||
|             .map(|(i, _)| i) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let ip_set: HashSet<usize> = ip_indices.iter().cloned().collect(); | ||||
|         let hostname_set: HashSet<usize> = hostname_indices.iter().cloned().collect(); | ||||
| 
 | ||||
|         if !ip_indices.is_empty() | ||||
|             && !hostname_indices.is_empty() | ||||
|             && ip_set.intersection(&hostname_set).count() == 0 | ||||
|             .any(|m| m.ipaddr == ipaddr.to_string() && m.mac == mac) | ||||
|         { | ||||
|             return Err(DhcpError::Configuration(format!( | ||||
|                 "Configuration conflict: IP {} and hostname '{}' exist, but in different static host entries.", | ||||
|                 ipaddr, hostname | ||||
|             ))); | ||||
|             info!("Mapping already exists for {} [{}], skipping", ipaddr, mac); | ||||
|             return Ok(()); | ||||
|         } | ||||
| 
 | ||||
|         let mut all_indices: Vec<&usize> = ip_set.union(&hostname_set).collect(); | ||||
|         all_indices.sort(); | ||||
|         if existing_mappings | ||||
|             .iter() | ||||
|             .any(|m| m.ipaddr == ipaddr.to_string()) | ||||
|         { | ||||
|             return Err(DhcpError::IpAddressAlreadyMapped(ipaddr.to_string())); | ||||
|         } | ||||
| 
 | ||||
|         let mac_list = mac.join(","); | ||||
|         if existing_mappings.iter().any(|m| m.mac == mac) { | ||||
|             return Err(DhcpError::MacAddressAlreadyMapped(mac)); | ||||
|         } | ||||
| 
 | ||||
|         match all_indices.len() { | ||||
|             0 => { | ||||
|                 info!( | ||||
|                     "Creating new static host for {} ({}) with MAC {}", | ||||
|                     hostname, ipaddr, mac_list | ||||
|                 ); | ||||
|                 let new_host = DnsmasqHost { | ||||
|                     uuid: Uuid::new_v4().to_string(), | ||||
|                     host: hostname.to_string(), | ||||
|                     ip: ip_str.into(), | ||||
|                     hwaddr: mac_list.into(), | ||||
|                     local: MaybeString::from("1"), | ||||
|                     ignore: Some(0), | ||||
|                     domain: domain_name.into(), | ||||
|         let static_map = StaticMap { | ||||
|             mac, | ||||
|             ipaddr: ipaddr.to_string(), | ||||
|             hostname: hostname, | ||||
|             ..Default::default() | ||||
|         }; | ||||
|                 hosts.push(new_host); | ||||
|             } | ||||
|             1 => { | ||||
|                 let host_index = *all_indices[0]; | ||||
|                 let host_to_modify = &mut hosts[host_index]; | ||||
|                 let host_to_modify_ip = host_to_modify.ip.content_string(); | ||||
|                 if host_to_modify_ip != ip_str { | ||||
|                     warn!( | ||||
|                         "Hostname '{}' already exists with a different IP ({}). Setting new IP {ip_str}. Appending MAC {}.", | ||||
|                         hostname, host_to_modify_ip, mac_list | ||||
|                     ); | ||||
|                     host_to_modify.ip.content = Some(ip_str); | ||||
|                 } else if host_to_modify.host != hostname { | ||||
|                     warn!( | ||||
|                         "IP {} already exists with a different hostname ('{}'). Setting hostname to {hostname}. Appending MAC {}.", | ||||
|                         ipaddr, host_to_modify.host, mac_list | ||||
|                     ); | ||||
|                     host_to_modify.host = hostname.to_string(); | ||||
|                 } | ||||
| 
 | ||||
|                 for single_mac in mac.iter() { | ||||
|                     if !host_to_modify | ||||
|                         .hwaddr | ||||
|                         .content_string() | ||||
|                         .split(',') | ||||
|                         .any(|m| m.eq_ignore_ascii_case(single_mac)) | ||||
|                     { | ||||
|                         info!( | ||||
|                             "Appending MAC {} to existing static host for {} ({})", | ||||
|                             single_mac, host_to_modify.host, host_to_modify_ip | ||||
|                         ); | ||||
|                         let mut updated_macs = host_to_modify.hwaddr.content_string().to_string(); | ||||
|                         updated_macs.push(','); | ||||
|                         updated_macs.push_str(single_mac); | ||||
|                         host_to_modify.hwaddr.content = updated_macs.into(); | ||||
|                     } else { | ||||
|                         debug!( | ||||
|                         "MAC {} already present in static host entry for {} ({}). No changes made.", | ||||
|                         single_mac, host_to_modify.host, host_to_modify_ip | ||||
|                     ); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|             _ => { | ||||
|                 return Err(DhcpError::Configuration(format!( | ||||
|                     "Configuration conflict: Found multiple host entries matching IP {} and/or hostname '{}'. Cannot resolve automatically.", | ||||
|                     ipaddr, hostname | ||||
|                 ))); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         existing_mappings.push(static_map); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
| @ -206,20 +110,13 @@ impl<'a> DhcpConfigDnsMasq<'a> { | ||||
|     /// Retrieves the list of current static mappings by shelling out to `configctl`.
 | ||||
|     /// This provides the real-time state from the running system.
 | ||||
|     pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> { | ||||
|         // Note: This command is for the 'dhcpd' service. If dnsmasq uses a different command
 | ||||
|         // or key, this will need to be adjusted.
 | ||||
|         let list_static_output = self | ||||
|             .opnsense_shell | ||||
|             .exec("configctl dhcpd list static") | ||||
|             .await?; | ||||
| 
 | ||||
|         let value: serde_json::Value = serde_json::from_str(&list_static_output).map_err(|e| { | ||||
|             Error::Command(format!( | ||||
|                 "Got invalid json from configctl {list_static_output} : {e}" | ||||
|             )) | ||||
|         })?; | ||||
| 
 | ||||
|         // The JSON output key might be 'dhcpd' even when dnsmasq is the backend.
 | ||||
|         let value: serde_json::Value = serde_json::from_str(&list_static_output) | ||||
|             .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}")); | ||||
|         let static_maps = value["dhcpd"] | ||||
|             .as_array() | ||||
|             .ok_or(Error::Command(format!( | ||||
| @ -238,36 +135,6 @@ impl<'a> DhcpConfigDnsMasq<'a> { | ||||
|         Ok(static_maps) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn set_dhcp_range(&mut self, start: &str, end: &str) -> Result<(), DhcpError> { | ||||
|         let dnsmasq = self.get_dnsmasq(); | ||||
|         let ranges = &mut dnsmasq.dhcp_ranges; | ||||
| 
 | ||||
|         // Assuming DnsMasq has dhcp_ranges: Vec<DhcpRange>
 | ||||
|         // Find existing range for "lan" interface
 | ||||
|         if let Some(range) = ranges | ||||
|             .iter_mut() | ||||
|             .find(|r| r.interface == Some("lan".to_string())) | ||||
|         { | ||||
|             // Update existing range
 | ||||
|             range.start_addr = Some(start.to_string()); | ||||
|             range.end_addr = Some(end.to_string()); | ||||
|         } else { | ||||
|             // Create new range
 | ||||
|             let new_range = DhcpRange { | ||||
|                 uuid: Some(Uuid::new_v4().to_string()), | ||||
|                 interface: Some("lan".to_string()), | ||||
|                 start_addr: Some(start.to_string()), | ||||
|                 end_addr: Some(end.to_string()), | ||||
|                 domain_type: Some("range".to_string()), | ||||
|                 nosync: Some(0), | ||||
|                 ..Default::default() | ||||
|             }; | ||||
|             ranges.push(new_range); | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn set_pxe_options( | ||||
|         &self, | ||||
|         tftp_ip: Option<String>, | ||||
| @ -275,9 +142,9 @@ impl<'a> DhcpConfigDnsMasq<'a> { | ||||
|         efi_filename: String, | ||||
|         ipxe_filename: String, | ||||
|     ) -> Result<(), DhcpError> { | ||||
|         // OPNsense does not support negative tags via its API for dnsmasq, and the required
 | ||||
|         // logic is complex. Therefore, we write a configuration file directly to the
 | ||||
|         // dnsmasq.conf.d directory to achieve the desired PXE boot behavior.
 | ||||
|         // As of writing this opnsense does not support negative tags, and the dnsmasq config is a
 | ||||
|         // bit complicated anyways. So we are writing directly a dnsmasq config file to
 | ||||
|         // /usr/local/etc/dnsmasq.conf.d
 | ||||
|         let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}")); | ||||
| 
 | ||||
|         let config = format!( | ||||
| @ -296,7 +163,7 @@ dhcp-boot=tag:efi,tag:!ipxe,{efi_filename}{tftp_str} | ||||
| dhcp-boot=tag:ipxe,{ipxe_filename}{tftp_str} | ||||
| 
 | ||||
| # Provide undionly to legacy bios clients | ||||
| dhcp-boot=tag:bios,tag:!ipxe,{bios_filename}{tftp_str} | ||||
| dhcp-boot=tag:bios,{bios_filename}{tftp_str} | ||||
| " | ||||
|         ); | ||||
|         info!("Writing configuration file to {DNS_MASQ_PXE_CONFIG_FILE}"); | ||||
| @ -318,302 +185,3 @@ dhcp-boot=tag:bios,tag:!ipxe,{bios_filename}{tftp_str} | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod test { | ||||
|     use crate::config::DummyOPNSenseShell; | ||||
| 
 | ||||
|     use super::*; | ||||
|     use opnsense_config_xml::OPNsense; | ||||
|     use std::net::Ipv4Addr; | ||||
|     use std::sync::Arc; | ||||
| 
 | ||||
|     /// Helper function to create a DnsmasqHost with minimal boilerplate.
 | ||||
|     fn create_host(uuid: &str, host: &str, ip: &str, hwaddr: &str) -> DnsmasqHost { | ||||
|         DnsmasqHost { | ||||
|             uuid: uuid.to_string(), | ||||
|             host: host.to_string(), | ||||
|             ip: ip.into(), | ||||
|             hwaddr: hwaddr.into(), | ||||
|             local: MaybeString::from("1"), | ||||
|             ignore: Some(0), | ||||
|             ..Default::default() | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Helper to set up the test environment with an initial OPNsense configuration.
 | ||||
|     fn setup_test_env(initial_hosts: Vec<DnsmasqHost>) -> DhcpConfigDnsMasq<'static> { | ||||
|         let opnsense_config = Box::leak(Box::new(OPNsense { | ||||
|             dnsmasq: Some(DnsMasq { | ||||
|                 hosts: initial_hosts, | ||||
|                 ..Default::default() | ||||
|             }), | ||||
|             ..Default::default() | ||||
|         })); | ||||
| 
 | ||||
|         DhcpConfigDnsMasq::new(opnsense_config, Arc::new(DummyOPNSenseShell {})) | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_first_static_mapping() { | ||||
|         let mut dhcp_config = setup_test_env(vec![]); | ||||
|         let ip = Ipv4Addr::new(192, 168, 1, 10); | ||||
|         let mac = "00:11:22:33:44:55"; | ||||
|         let hostname = "new-host"; | ||||
| 
 | ||||
|         dhcp_config | ||||
|             .add_static_mapping(&vec![mac.to_string()], &ip, hostname) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         let host = &hosts[0]; | ||||
|         assert_eq!(host.host, hostname); | ||||
|         assert_eq!(host.ip, ip.to_string().into()); | ||||
|         assert_eq!(host.hwaddr.content_string(), mac); | ||||
|         assert!(Uuid::parse_str(&host.uuid).is_ok()); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_hostname_split_into_host_domain() { | ||||
|         let mut dhcp_config = setup_test_env(vec![]); | ||||
|         let ip = Ipv4Addr::new(192, 168, 1, 10); | ||||
|         let mac = "00:11:22:33:44:55"; | ||||
|         let hostname = "new-host"; | ||||
|         let domain = "some.domain"; | ||||
| 
 | ||||
|         dhcp_config | ||||
|             .add_static_mapping(&vec![mac.to_string()], &ip, &format!("{hostname}.{domain}")) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         let host = &hosts[0]; | ||||
|         assert_eq!(host.host, hostname); | ||||
|         assert_eq!(host.domain.content_string(), domain); | ||||
|         assert_eq!(host.ip, ip.to_string().into()); | ||||
|         assert_eq!(host.hwaddr.content_string(), mac); | ||||
|         assert!(Uuid::parse_str(&host.uuid).is_ok()); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_mac_to_existing_host_by_ip_and_hostname() { | ||||
|         let initial_host = create_host( | ||||
|             "uuid-1", | ||||
|             "existing-host", | ||||
|             "192.168.1.20", | ||||
|             "AA:BB:CC:DD:EE:FF", | ||||
|         ); | ||||
|         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||
|         let ip = Ipv4Addr::new(192, 168, 1, 20); | ||||
|         let new_mac = "00:11:22:33:44:55"; | ||||
|         let hostname = "existing-host"; | ||||
| 
 | ||||
|         dhcp_config | ||||
|             .add_static_mapping(&vec![new_mac.to_string()], &ip, hostname) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         let host = &hosts[0]; | ||||
|         assert_eq!( | ||||
|             host.hwaddr.content_string(), | ||||
|             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_mac_to_existing_host_by_ip_only() { | ||||
|         let initial_host = create_host( | ||||
|             "uuid-1", | ||||
|             "existing-host", | ||||
|             "192.168.1.20", | ||||
|             "AA:BB:CC:DD:EE:FF", | ||||
|         ); | ||||
|         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||
|         let ip = Ipv4Addr::new(192, 168, 1, 20); | ||||
|         let new_mac = "00:11:22:33:44:55"; | ||||
| 
 | ||||
|         // Using a different hostname should still find the host by IP and log a warning.
 | ||||
|         let new_hostname = "different-host-name"; | ||||
|         dhcp_config | ||||
|             .add_static_mapping(&vec![new_mac.to_string()], &ip, new_hostname) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         let host = &hosts[0]; | ||||
|         assert_eq!( | ||||
|             host.hwaddr.content_string(), | ||||
|             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||
|         ); | ||||
|         assert_eq!(host.host, new_hostname); // hostname should be updated
 | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_mac_to_existing_host_by_hostname_only() { | ||||
|         let initial_host = create_host( | ||||
|             "uuid-1", | ||||
|             "existing-host", | ||||
|             "192.168.1.20", | ||||
|             "AA:BB:CC:DD:EE:FF", | ||||
|         ); | ||||
|         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||
|         let new_mac = "00:11:22:33:44:55"; | ||||
|         let hostname = "existing-host"; | ||||
| 
 | ||||
|         // Using a different IP should still find the host by hostname and log a warning.
 | ||||
|         dhcp_config | ||||
|             .add_static_mapping( | ||||
|                 &vec![new_mac.to_string()], | ||||
|                 &Ipv4Addr::new(192, 168, 1, 99), | ||||
|                 hostname, | ||||
|             ) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         let host = &hosts[0]; | ||||
|         assert_eq!( | ||||
|             host.hwaddr.content_string(), | ||||
|             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||
|         ); | ||||
|         assert_eq!(host.ip.content_string(), "192.168.1.99"); // Original IP should be preserved.
 | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_duplicate_mac_to_host() { | ||||
|         let initial_mac = "AA:BB:CC:DD:EE:FF"; | ||||
|         let initial_host = create_host("uuid-1", "host-1", "192.168.1.20", initial_mac); | ||||
|         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||
| 
 | ||||
|         dhcp_config | ||||
|             .add_static_mapping( | ||||
|                 &vec![initial_mac.to_string()], | ||||
|                 &Ipv4Addr::new(192, 168, 1, 20), | ||||
|                 "host-1", | ||||
|             ) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         assert_eq!(hosts[0].hwaddr.content_string(), initial_mac); // No change, no duplication.
 | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_add_invalid_mac_address() { | ||||
|         let mut dhcp_config = setup_test_env(vec![]); | ||||
|         let result = dhcp_config.add_static_mapping( | ||||
|             &vec!["invalid-mac".to_string()], | ||||
|             &Ipv4Addr::new(10, 0, 0, 1), | ||||
|             "host", | ||||
|         ); | ||||
|         assert!(matches!(result, Err(DhcpError::InvalidMacAddress(_)))); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_error_on_conflicting_ip_and_hostname() { | ||||
|         let host_a = create_host("uuid-a", "host-a", "192.168.1.10", "AA:AA:AA:AA:AA:AA"); | ||||
|         let host_b = create_host("uuid-b", "host-b", "192.168.1.20", "BB:BB:BB:BB:BB:BB"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host_a, host_b]); | ||||
| 
 | ||||
|         let result = dhcp_config.add_static_mapping( | ||||
|             &vec!["CC:CC:CC:CC:CC:CC".to_string()], | ||||
|             &Ipv4Addr::new(192, 168, 1, 10), | ||||
|             "host-b", | ||||
|         ); | ||||
|         // This IP belongs to host-a, but the hostname belongs to host-b.
 | ||||
|         assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: IP 192.168.1.10 and hostname 'host-b' exist, but in different static host entries.".to_string()))); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_error_on_multiple_ip_matches() { | ||||
|         let host_a = create_host("uuid-a", "host-a", "192.168.1.30", "AA:AA:AA:AA:AA:AA"); | ||||
|         let host_b = create_host("uuid-b", "host-b", "192.168.1.30", "BB:BB:BB:BB:BB:BB"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host_a, host_b]); | ||||
| 
 | ||||
|         // This IP is ambiguous.
 | ||||
|         let result = dhcp_config.add_static_mapping( | ||||
|             &vec!["CC:CC:CC:CC:CC:CC".to_string()], | ||||
|             &Ipv4Addr::new(192, 168, 1, 30), | ||||
|             "new-host", | ||||
|         ); | ||||
|         assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: Found multiple host entries matching IP 192.168.1.30 and/or hostname 'new-host'. Cannot resolve automatically.".to_string()))); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_remove_mac_from_multi_mac_host() { | ||||
|         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2,mac-3"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host]); | ||||
| 
 | ||||
|         dhcp_config.remove_static_mapping("mac-2"); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         assert_eq!(hosts[0].hwaddr.content_string(), "mac-1,mac-3"); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_remove_last_mac_from_host() { | ||||
|         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host]); | ||||
| 
 | ||||
|         dhcp_config.remove_static_mapping("mac-1"); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert!(hosts.is_empty()); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_remove_non_existent_mac() { | ||||
|         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host.clone()]); | ||||
| 
 | ||||
|         dhcp_config.remove_static_mapping("mac-nonexistent"); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 1); | ||||
|         assert_eq!(hosts[0], host); // The host should be unchanged.
 | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_remove_mac_case_insensitively() { | ||||
|         let host = create_host("uuid-1", "host-1", "192.168.1.50", "AA:BB:CC:DD:EE:FF"); | ||||
|         let mut dhcp_config = setup_test_env(vec![host]); | ||||
| 
 | ||||
|         dhcp_config.remove_static_mapping("aa:bb:cc:dd:ee:ff"); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert!(hosts.is_empty()); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_remove_mac_from_correct_host_only() { | ||||
|         let host1 = create_host( | ||||
|             "uuid-1", | ||||
|             "host-1", | ||||
|             "192.168.1.50", | ||||
|             "AA:AA:AA:AA:AA:AA,BB:BB:BB:BB:BB:BB", | ||||
|         ); | ||||
|         let host2 = create_host( | ||||
|             "uuid-2", | ||||
|             "host-2", | ||||
|             "192.168.1.51", | ||||
|             "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD", | ||||
|         ); | ||||
|         let mut dhcp_config = setup_test_env(vec![host1.clone(), host2.clone()]); | ||||
| 
 | ||||
|         dhcp_config.remove_static_mapping("AA:AA:AA:AA:AA:AA"); | ||||
| 
 | ||||
|         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||
|         assert_eq!(hosts.len(), 2); | ||||
|         let updated_host1 = hosts.iter().find(|h| h.uuid == "uuid-1").unwrap(); | ||||
|         let unchanged_host2 = hosts.iter().find(|h| h.uuid == "uuid-2").unwrap(); | ||||
| 
 | ||||
|         assert_eq!(updated_host1.hwaddr.content_string(), "BB:BB:BB:BB:BB:BB"); | ||||
|         assert_eq!( | ||||
|             unchanged_host2.hwaddr.content_string(), | ||||
|             "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD" | ||||
|         ); | ||||
|     } | ||||
| } | ||||
|  | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -215,6 +215,7 @@ | ||||
|       <description>System Administrators</description> | ||||
|       <scope>system</scope> | ||||
|       <gid>1999</gid> | ||||
|       <member>0</member> | ||||
|       <member>2000</member> | ||||
|       <priv>page-all</priv> | ||||
|     </group> | ||||
|  | ||||
| @ -27,6 +27,7 @@ | ||||
|       <description>System Administrators</description> | ||||
|       <scope>system</scope> | ||||
|       <gid>1999</gid> | ||||
|       <member>0</member> | ||||
|       <member>2000</member> | ||||
|       <priv>page-all</priv> | ||||
|     </group> | ||||
|  | ||||
| @ -27,6 +27,7 @@ | ||||
|       <description>System Administrators</description> | ||||
|       <scope>system</scope> | ||||
|       <gid>1999</gid> | ||||
|       <member>0</member> | ||||
|       <member>2000</member> | ||||
|       <priv>page-all</priv> | ||||
|     </group> | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user