forked from NationTech/harmony
		
	Merge branch 'master' into fix/ingress
This commit is contained in:
		
						commit
						29d22a611f
					
				
							
								
								
									
										2
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							| @ -2,3 +2,5 @@ bootx64.efi filter=lfs diff=lfs merge=lfs -text | |||||||
| grubx64.efi filter=lfs diff=lfs merge=lfs -text | grubx64.efi filter=lfs diff=lfs merge=lfs -text | ||||||
| initrd filter=lfs diff=lfs merge=lfs -text | initrd filter=lfs diff=lfs merge=lfs -text | ||||||
| linux filter=lfs diff=lfs merge=lfs -text | linux filter=lfs diff=lfs merge=lfs -text | ||||||
|  | data/okd/bin/* filter=lfs diff=lfs merge=lfs -text | ||||||
|  | data/okd/installer_image/* filter=lfs diff=lfs merge=lfs -text | ||||||
|  | |||||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -3,6 +3,7 @@ private_repos/ | |||||||
| 
 | 
 | ||||||
| ### Harmony ### | ### Harmony ### | ||||||
| harmony.log | harmony.log | ||||||
|  | data/okd/installation_files* | ||||||
| 
 | 
 | ||||||
| ### Helm ### | ### Helm ### | ||||||
| # Chart dependencies | # Chart dependencies | ||||||
|  | |||||||
| @ -0,0 +1,20 @@ | |||||||
|  | { | ||||||
|  |   "db_name": "SQLite", | ||||||
|  |   "query": "SELECT host_id FROM host_role_mapping WHERE role = ?", | ||||||
|  |   "describe": { | ||||||
|  |     "columns": [ | ||||||
|  |       { | ||||||
|  |         "name": "host_id", | ||||||
|  |         "ordinal": 0, | ||||||
|  |         "type_info": "Text" | ||||||
|  |       } | ||||||
|  |     ], | ||||||
|  |     "parameters": { | ||||||
|  |       "Right": 1 | ||||||
|  |     }, | ||||||
|  |     "nullable": [ | ||||||
|  |       false | ||||||
|  |     ] | ||||||
|  |   }, | ||||||
|  |   "hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91" | ||||||
|  | } | ||||||
| @ -0,0 +1,32 @@ | |||||||
|  | { | ||||||
|  |   "db_name": "SQLite", | ||||||
|  |   "query": "\n        SELECT\n            p1.id,\n            p1.version_id,\n            p1.data as \"data: Json<PhysicalHost>\"\n        FROM\n            physical_hosts p1\n        INNER JOIN (\n            SELECT\n                id,\n                MAX(version_id) AS max_version\n            FROM\n                physical_hosts\n            GROUP BY\n                id\n        ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version\n        ", | ||||||
|  |   "describe": { | ||||||
|  |     "columns": [ | ||||||
|  |       { | ||||||
|  |         "name": "id", | ||||||
|  |         "ordinal": 0, | ||||||
|  |         "type_info": "Text" | ||||||
|  |       }, | ||||||
|  |       { | ||||||
|  |         "name": "version_id", | ||||||
|  |         "ordinal": 1, | ||||||
|  |         "type_info": "Text" | ||||||
|  |       }, | ||||||
|  |       { | ||||||
|  |         "name": "data: Json<PhysicalHost>", | ||||||
|  |         "ordinal": 2, | ||||||
|  |         "type_info": "Blob" | ||||||
|  |       } | ||||||
|  |     ], | ||||||
|  |     "parameters": { | ||||||
|  |       "Right": 0 | ||||||
|  |     }, | ||||||
|  |     "nullable": [ | ||||||
|  |       false, | ||||||
|  |       false, | ||||||
|  |       false | ||||||
|  |     ] | ||||||
|  |   }, | ||||||
|  |   "hash": "8d247918eca10a88b784ee353db090c94a222115c543231f2140cba27bd0f067" | ||||||
|  | } | ||||||
| @ -0,0 +1,12 @@ | |||||||
|  | { | ||||||
|  |   "db_name": "SQLite", | ||||||
|  |   "query": "\n        INSERT INTO host_role_mapping (host_id, role)\n        VALUES (?, ?)\n        ", | ||||||
|  |   "describe": { | ||||||
|  |     "columns": [], | ||||||
|  |     "parameters": { | ||||||
|  |       "Right": 2 | ||||||
|  |     }, | ||||||
|  |     "nullable": [] | ||||||
|  |   }, | ||||||
|  |   "hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff" | ||||||
|  | } | ||||||
							
								
								
									
										688
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										688
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,4 +1,3 @@ | |||||||
| use log::debug; |  | ||||||
| use mdns_sd::{ServiceDaemon, ServiceEvent}; | use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||||
| 
 | 
 | ||||||
| use crate::SERVICE_TYPE; | use crate::SERVICE_TYPE; | ||||||
| @ -74,7 +73,7 @@ pub async fn discover() { | |||||||
|     // }
 |     // }
 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| async fn discover_example() { | async fn _discover_example() { | ||||||
|     use mdns_sd::{ServiceDaemon, ServiceEvent}; |     use mdns_sd::{ServiceDaemon, ServiceEvent}; | ||||||
| 
 | 
 | ||||||
|     // Create a daemon
 |     // Create a daemon
 | ||||||
|  | |||||||
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/kubectl
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/kubectl
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/oc
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/oc
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/oc_README.md
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/oc_README.md
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install
									 (Stored with Git LFS)
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install_README.md
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/bin/openshift-install_README.md
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-initramfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-kernel.x86_64
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								data/okd/installer_image/scos-9.0.20250510-0-live-rootfs.x86_64.img
									 (Stored with Git LFS)
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										1
									
								
								data/okd/installer_image/scos-live-initramfs.x86_64.img
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								data/okd/installer_image/scos-live-initramfs.x86_64.img
									
									
									
									
									
										Symbolic link
									
								
							| @ -0,0 +1 @@ | |||||||
|  | scos-9.0.20250510-0-live-initramfs.x86_64.img | ||||||
							
								
								
									
										1
									
								
								data/okd/installer_image/scos-live-kernel.x86_64
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								data/okd/installer_image/scos-live-kernel.x86_64
									
									
									
									
									
										Symbolic link
									
								
							| @ -0,0 +1 @@ | |||||||
|  | scos-9.0.20250510-0-live-kernel.x86_64 | ||||||
							
								
								
									
										1
									
								
								data/okd/installer_image/scos-live-rootfs.x86_64.img
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								data/okd/installer_image/scos-live-rootfs.x86_64.img
									
									
									
									
									
										Symbolic link
									
								
							| @ -0,0 +1 @@ | |||||||
|  | scos-9.0.20250510-0-live-rootfs.x86_64.img | ||||||
							
								
								
									
										8
									
								
								docs/OKD_Host_preparation.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								docs/OKD_Host_preparation.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,8 @@ | |||||||
|  | ## Bios settings | ||||||
|  | 
 | ||||||
|  | 1. CSM : Disabled (compatibility support to boot gpt formatted drives) | ||||||
|  | 2. Secure boot : disabled | ||||||
|  | 3. Boot order : | ||||||
|  |     1. Local Hard drive | ||||||
|  |     2. PXE IPv4 | ||||||
|  | 4. System clock, make sure it is adjusted, otherwise you will get invalid certificates error | ||||||
| @ -2,7 +2,7 @@ use harmony::{ | |||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
|         dummy::{ErrorScore, PanicScore, SuccessScore}, |         dummy::{ErrorScore, PanicScore, SuccessScore}, | ||||||
|         inventory::DiscoverInventoryAgentScore, |         inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|     }, |     }, | ||||||
|     topology::LocalhostTopology, |     topology::LocalhostTopology, | ||||||
| }; | }; | ||||||
| @ -16,7 +16,7 @@ async fn main() { | |||||||
|             Box::new(SuccessScore {}), |             Box::new(SuccessScore {}), | ||||||
|             Box::new(ErrorScore {}), |             Box::new(ErrorScore {}), | ||||||
|             Box::new(PanicScore {}), |             Box::new(PanicScore {}), | ||||||
|             Box::new(DiscoverInventoryAgentScore { |             Box::new(LaunchDiscoverInventoryAgentScore { | ||||||
|                 discovery_timeout: Some(10), |                 discovery_timeout: Some(10), | ||||||
|             }), |             }), | ||||||
|         ], |         ], | ||||||
|  | |||||||
| @ -13,6 +13,7 @@ harmony_types = { path = "../../harmony_types" } | |||||||
| cidr = { workspace = true } | cidr = { workspace = true } | ||||||
| tokio = { workspace = true } | tokio = { workspace = true } | ||||||
| harmony_macros = { path = "../../harmony_macros" } | harmony_macros = { path = "../../harmony_macros" } | ||||||
|  | harmony_secret = { path = "../../harmony_secret" } | ||||||
| log = { workspace = true } | log = { workspace = true } | ||||||
| env_logger = { workspace = true } | env_logger = { workspace = true } | ||||||
| url = { workspace = true } | url = { workspace = true } | ||||||
|  | |||||||
| @ -5,22 +5,24 @@ use std::{ | |||||||
| 
 | 
 | ||||||
| use cidr::Ipv4Cidr; | use cidr::Ipv4Cidr; | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, |     config::secret::SshKeyPair, | ||||||
|  |     data::{FileContent, FilePath}, | ||||||
|  |     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||||
|     infra::opnsense::OPNSenseManagementInterface, |     infra::opnsense::OPNSenseManagementInterface, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
|         http::StaticFilesHttpScore, |         http::StaticFilesHttpScore, | ||||||
|         ipxe::IpxeScore, |  | ||||||
|         okd::{ |         okd::{ | ||||||
|             bootstrap_dhcp::OKDBootstrapDhcpScore, |             bootstrap_dhcp::OKDBootstrapDhcpScore, | ||||||
|             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, |             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, | ||||||
|             dns::OKDDnsScore, |             dns::OKDDnsScore, ipxe::OKDIpxeScore, | ||||||
|         }, |         }, | ||||||
|         tftp::TftpScore, |         tftp::TftpScore, | ||||||
|     }, |     }, | ||||||
|     topology::{LogicalHost, UnmanagedRouter}, |     topology::{LogicalHost, UnmanagedRouter}, | ||||||
| }; | }; | ||||||
| use harmony_macros::{ip, mac_address}; | use harmony_macros::{ip, mac_address}; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
| use harmony_types::net::Url; | use harmony_types::net::Url; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| @ -124,14 +126,28 @@ async fn main() { | |||||||
|     let load_balancer_score = |     let load_balancer_score = | ||||||
|         harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); |         harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); | ||||||
| 
 | 
 | ||||||
|  |     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||||
|  | 
 | ||||||
|     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); |     let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); | ||||||
|     let http_score = StaticFilesHttpScore { |     let http_score = StaticFilesHttpScore { | ||||||
|         folder_to_serve: Some(Url::LocalFolder( |         folder_to_serve: Some(Url::LocalFolder( | ||||||
|             "./data/watchguard/pxe-http-files".to_string(), |             "./data/watchguard/pxe-http-files".to_string(), | ||||||
|         )), |         )), | ||||||
|         files: vec![], |         files: vec![], | ||||||
|  |         remote_path: None, | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     let kickstart_filename = "inventory.kickstart".to_string(); | ||||||
|  |     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); | ||||||
|  | 
 | ||||||
|  |     let ipxe_score = OKDIpxeScore { | ||||||
|  |         kickstart_filename, | ||||||
|  |         harmony_inventory_agent, | ||||||
|  |         cluster_pubkey: FileContent { | ||||||
|  |             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||||
|  |             content: ssh_key.public, | ||||||
|  |         }, | ||||||
|     }; |     }; | ||||||
|     let ipxe_score = IpxeScore::new(); |  | ||||||
| 
 | 
 | ||||||
|     harmony_tui::run( |     harmony_tui::run( | ||||||
|         inventory, |         inventory, | ||||||
|  | |||||||
							
								
								
									
										21
									
								
								examples/okd_installation/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								examples/okd_installation/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | |||||||
|  | [package] | ||||||
|  | name = "example-okd-install" | ||||||
|  | edition = "2024" | ||||||
|  | version.workspace = true | ||||||
|  | readme.workspace = true | ||||||
|  | license.workspace = true | ||||||
|  | publish = false | ||||||
|  | 
 | ||||||
|  | [dependencies] | ||||||
|  | harmony = { path = "../../harmony" } | ||||||
|  | harmony_cli = { path = "../../harmony_cli" } | ||||||
|  | harmony_types = { path = "../../harmony_types" } | ||||||
|  | harmony_secret = { path = "../../harmony_secret" } | ||||||
|  | harmony_secret_derive = { path = "../../harmony_secret_derive" } | ||||||
|  | cidr = { workspace = true } | ||||||
|  | tokio = { workspace = true } | ||||||
|  | harmony_macros = { path = "../../harmony_macros" } | ||||||
|  | log = { workspace = true } | ||||||
|  | env_logger = { workspace = true } | ||||||
|  | url = { workspace = true } | ||||||
|  | serde.workspace = true | ||||||
							
								
								
									
										4
									
								
								examples/okd_installation/env.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								examples/okd_installation/env.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | |||||||
|  | export HARMONY_SECRET_NAMESPACE=example-vms | ||||||
|  | export HARMONY_SECRET_STORE=file | ||||||
|  | export HARMONY_DATABASE_URL=sqlite://harmony_vms.sqlite RUST_LOG=info  | ||||||
|  | export RUST_LOG=info | ||||||
							
								
								
									
										34
									
								
								examples/okd_installation/src/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								examples/okd_installation/src/main.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,34 @@ | |||||||
|  | mod topology; | ||||||
|  | 
 | ||||||
|  | use crate::topology::{get_inventory, get_topology}; | ||||||
|  | use harmony::{ | ||||||
|  |     config::secret::SshKeyPair, | ||||||
|  |     data::{FileContent, FilePath}, | ||||||
|  |     modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore}, | ||||||
|  |     score::Score, | ||||||
|  |     topology::HAClusterTopology, | ||||||
|  | }; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
|  | 
 | ||||||
|  | #[tokio::main] | ||||||
|  | async fn main() { | ||||||
|  |     let inventory = get_inventory(); | ||||||
|  |     let topology = get_topology().await; | ||||||
|  | 
 | ||||||
|  |     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||||
|  | 
 | ||||||
|  |     let mut scores: Vec<Box<dyn Score<HAClusterTopology>>> = vec![Box::new(OKDIpxeScore { | ||||||
|  |         kickstart_filename: "inventory.kickstart".to_string(), | ||||||
|  |         harmony_inventory_agent: "harmony_inventory_agent".to_string(), | ||||||
|  |         cluster_pubkey: FileContent { | ||||||
|  |             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||||
|  |             content: ssh_key.public, | ||||||
|  |         }, | ||||||
|  |     })]; | ||||||
|  | 
 | ||||||
|  |     scores.append(&mut OKDInstallationPipeline::get_all_scores().await); | ||||||
|  | 
 | ||||||
|  |     harmony_cli::run(inventory, topology, scores, None) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  | } | ||||||
							
								
								
									
										77
									
								
								examples/okd_installation/src/topology.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								examples/okd_installation/src/topology.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,77 @@ | |||||||
|  | use cidr::Ipv4Cidr; | ||||||
|  | use harmony::{ | ||||||
|  |     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||||
|  |     infra::opnsense::OPNSenseManagementInterface, | ||||||
|  |     inventory::Inventory, | ||||||
|  |     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||||
|  | }; | ||||||
|  | use harmony_macros::{ip, ipv4}; | ||||||
|  | use harmony_secret::{Secret, SecretManager}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use std::{net::IpAddr, sync::Arc}; | ||||||
|  | 
 | ||||||
|  | #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||||
|  | struct OPNSenseFirewallConfig { | ||||||
|  |     username: String, | ||||||
|  |     password: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub async fn get_topology() -> HAClusterTopology { | ||||||
|  |     let firewall = harmony::topology::LogicalHost { | ||||||
|  |         ip: ip!("192.168.1.1"), | ||||||
|  |         name: String::from("opnsense-1"), | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await; | ||||||
|  |     let config = config.unwrap(); | ||||||
|  | 
 | ||||||
|  |     let opnsense = Arc::new( | ||||||
|  |         harmony::infra::opnsense::OPNSenseFirewall::new( | ||||||
|  |             firewall, | ||||||
|  |             None, | ||||||
|  |             &config.username, | ||||||
|  |             &config.password, | ||||||
|  |         ) | ||||||
|  |         .await, | ||||||
|  |     ); | ||||||
|  |     let lan_subnet = ipv4!("192.168.1.0"); | ||||||
|  |     let gateway_ipv4 = ipv4!("192.168.1.1"); | ||||||
|  |     let gateway_ip = IpAddr::V4(gateway_ipv4); | ||||||
|  |     harmony::topology::HAClusterTopology { | ||||||
|  |         domain_name: "demo.harmony.mcd".to_string(), | ||||||
|  |         router: Arc::new(UnmanagedRouter::new( | ||||||
|  |             gateway_ip, | ||||||
|  |             Ipv4Cidr::new(lan_subnet, 24).unwrap(), | ||||||
|  |         )), | ||||||
|  |         load_balancer: opnsense.clone(), | ||||||
|  |         firewall: opnsense.clone(), | ||||||
|  |         tftp_server: opnsense.clone(), | ||||||
|  |         http_server: opnsense.clone(), | ||||||
|  |         dhcp_server: opnsense.clone(), | ||||||
|  |         dns_server: opnsense.clone(), | ||||||
|  |         control_plane: vec![LogicalHost { | ||||||
|  |             ip: ip!("192.168.1.20"), | ||||||
|  |             name: "master".to_string(), | ||||||
|  |         }], | ||||||
|  |         bootstrap_host: LogicalHost { | ||||||
|  |             ip: ip!("192.168.1.10"), | ||||||
|  |             name: "bootstrap".to_string(), | ||||||
|  |         }, | ||||||
|  |         workers: vec![], | ||||||
|  |         switch: vec![], | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn get_inventory() -> Inventory { | ||||||
|  |     Inventory { | ||||||
|  |         location: Location::new( | ||||||
|  |             "Some virtual machine or maybe a physical machine if you're cool".to_string(), | ||||||
|  |             "testopnsense".to_string(), | ||||||
|  |         ), | ||||||
|  |         switch: SwitchGroup::from([]), | ||||||
|  |         firewall_mgmt: Box::new(OPNSenseManagementInterface::new()), | ||||||
|  |         storage_host: vec![], | ||||||
|  |         worker_host: vec![], | ||||||
|  |         control_plane_host: vec![], | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										7
									
								
								examples/okd_installation/ssh_example_key
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								examples/okd_installation/ssh_example_key
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,7 @@ | |||||||
|  | -----BEGIN OPENSSH PRIVATE KEY----- | ||||||
|  | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW | ||||||
|  | QyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHAAAAJikacCNpGnA | ||||||
|  | jQAAAAtzc2gtZWQyNTUxOQAAACAcemw8pbwuvHFaYynxBbS0Cf3ThYuj1Utr7CDqjwySHA | ||||||
|  | AAAECiiKk4V6Q5cVs6axDM4sjAzZn/QCZLQekmYQXS9XbEYxx6bDylvC68cVpjKfEFtLQJ | ||||||
|  | /dOFi6PVS2vsIOqPDJIcAAAAEGplYW5nYWJAbGlsaWFuZTIBAgMEBQ== | ||||||
|  | -----END OPENSSH PRIVATE KEY----- | ||||||
							
								
								
									
										1
									
								
								examples/okd_installation/ssh_example_key.pub
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								examples/okd_installation/ssh_example_key.pub
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx6bDylvC68cVpjKfEFtLQJ/dOFi6PVS2vsIOqPDJIc jeangab@liliane2 | ||||||
| @ -1,7 +1,12 @@ | |||||||
| mod topology; | mod topology; | ||||||
| 
 | 
 | ||||||
| use crate::topology::{get_inventory, get_topology}; | use crate::topology::{get_inventory, get_topology}; | ||||||
| use harmony::modules::okd::ipxe::OkdIpxeScore; | use harmony::{ | ||||||
|  |     config::secret::SshKeyPair, | ||||||
|  |     data::{FileContent, FilePath}, | ||||||
|  |     modules::okd::ipxe::OKDIpxeScore, | ||||||
|  | }; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
| 
 | 
 | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() { | async fn main() { | ||||||
| @ -9,13 +14,16 @@ async fn main() { | |||||||
|     let topology = get_topology().await; |     let topology = get_topology().await; | ||||||
| 
 | 
 | ||||||
|     let kickstart_filename = "inventory.kickstart".to_string(); |     let kickstart_filename = "inventory.kickstart".to_string(); | ||||||
|     let cluster_pubkey_filename = "cluster_ssh_key.pub".to_string(); |  | ||||||
|     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); |     let harmony_inventory_agent = "harmony_inventory_agent".to_string(); | ||||||
|  |     let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap(); | ||||||
| 
 | 
 | ||||||
|     let ipxe_score = OkdIpxeScore { |     let ipxe_score = OKDIpxeScore { | ||||||
|         kickstart_filename, |         kickstart_filename, | ||||||
|         harmony_inventory_agent, |         harmony_inventory_agent, | ||||||
|         cluster_pubkey_filename, |         cluster_pubkey: FileContent { | ||||||
|  |             path: FilePath::Relative("cluster_ssh_key.pub".to_string()), | ||||||
|  |             content: ssh_key.public, | ||||||
|  |         }, | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None) |     harmony_cli::run(inventory, topology, vec![Box::new(ipxe_score)], None) | ||||||
|  | |||||||
| @ -1,28 +1,22 @@ | |||||||
| use cidr::Ipv4Cidr; | use cidr::Ipv4Cidr; | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, |     config::secret::OPNSenseFirewallCredentials, | ||||||
|  |     hardware::{Location, SwitchGroup}, | ||||||
|     infra::opnsense::OPNSenseManagementInterface, |     infra::opnsense::OPNSenseManagementInterface, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, |     topology::{HAClusterTopology, LogicalHost, UnmanagedRouter}, | ||||||
| }; | }; | ||||||
| use harmony_macros::{ip, ipv4}; | use harmony_macros::{ip, ipv4}; | ||||||
| use harmony_secret::{Secret, SecretManager}; | use harmony_secret::SecretManager; | ||||||
| use serde::{Deserialize, Serialize}; |  | ||||||
| use std::{net::IpAddr, sync::Arc}; | use std::{net::IpAddr, sync::Arc}; | ||||||
| 
 | 
 | ||||||
| #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] |  | ||||||
| struct OPNSenseFirewallConfig { |  | ||||||
|     username: String, |  | ||||||
|     password: String, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| pub async fn get_topology() -> HAClusterTopology { | pub async fn get_topology() -> HAClusterTopology { | ||||||
|     let firewall = harmony::topology::LogicalHost { |     let firewall = harmony::topology::LogicalHost { | ||||||
|         ip: ip!("192.168.1.1"), |         ip: ip!("192.168.1.1"), | ||||||
|         name: String::from("opnsense-1"), |         name: String::from("opnsense-1"), | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     let config = SecretManager::get::<OPNSenseFirewallConfig>().await; |     let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await; | ||||||
|     let config = config.unwrap(); |     let config = config.unwrap(); | ||||||
| 
 | 
 | ||||||
|     let opnsense = Arc::new( |     let opnsense = Arc::new( | ||||||
|  | |||||||
| @ -5,7 +5,7 @@ use std::{ | |||||||
| 
 | 
 | ||||||
| use cidr::Ipv4Cidr; | use cidr::Ipv4Cidr; | ||||||
| use harmony::{ | use harmony::{ | ||||||
|     hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, |     hardware::{HostCategory, Location, PhysicalHost, SwitchGroup}, | ||||||
|     infra::opnsense::OPNSenseManagementInterface, |     infra::opnsense::OPNSenseManagementInterface, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     modules::{ |     modules::{ | ||||||
| @ -85,6 +85,7 @@ async fn main() { | |||||||
|             "./data/watchguard/pxe-http-files".to_string(), |             "./data/watchguard/pxe-http-files".to_string(), | ||||||
|         )), |         )), | ||||||
|         files: vec![], |         files: vec![], | ||||||
|  |         remote_path: None, | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     harmony_tui::run( |     harmony_tui::run( | ||||||
|  | |||||||
| @ -9,6 +9,7 @@ use harmony::{ | |||||||
|     }, |     }, | ||||||
|     topology::{ |     topology::{ | ||||||
|         BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService, |         BackendServer, DummyInfra, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancerService, | ||||||
|  |         SSL, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
| use harmony_macros::ipv4; | use harmony_macros::ipv4; | ||||||
| @ -47,6 +48,7 @@ fn build_large_score() -> LoadBalancerScore { | |||||||
|                 .to_string(), |                 .to_string(), | ||||||
|             HttpMethod::GET, |             HttpMethod::GET, | ||||||
|             HttpStatusCode::Success2xx, |             HttpStatusCode::Success2xx, | ||||||
|  |             SSL::Disabled, | ||||||
|         )), |         )), | ||||||
|     }; |     }; | ||||||
|     LoadBalancerScore { |     LoadBalancerScore { | ||||||
|  | |||||||
| @ -68,9 +68,11 @@ thiserror.workspace = true | |||||||
| once_cell = "1.21.3" | once_cell = "1.21.3" | ||||||
| walkdir = "2.5.0" | walkdir = "2.5.0" | ||||||
| harmony_inventory_agent = { path = "../harmony_inventory_agent" } | harmony_inventory_agent = { path = "../harmony_inventory_agent" } | ||||||
| harmony_secret_derive = { version = "0.1.0", path = "../harmony_secret_derive" } | harmony_secret_derive = { path = "../harmony_secret_derive" } | ||||||
|  | harmony_secret = { path = "../harmony_secret" } | ||||||
| askama.workspace = true | askama.workspace = true | ||||||
| sqlx.workspace = true | sqlx.workspace = true | ||||||
|  | inquire.workspace = true | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| pretty_assertions.workspace = true | pretty_assertions.workspace = true | ||||||
|  | |||||||
| @ -1,3 +1,5 @@ | |||||||
|  | pub mod secret; | ||||||
|  | 
 | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
| use std::path::PathBuf; | use std::path::PathBuf; | ||||||
| 
 | 
 | ||||||
							
								
								
									
										20
									
								
								harmony/src/domain/config/secret.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								harmony/src/domain/config/secret.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,20 @@ | |||||||
|  | use harmony_secret_derive::Secret; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | 
 | ||||||
|  | #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||||
|  | pub struct OPNSenseFirewallCredentials { | ||||||
|  |     pub username: String, | ||||||
|  |     pub password: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TODO we need a better way to handle multiple "instances" of the same secret structure.
 | ||||||
|  | #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||||
|  | pub struct SshKeyPair { | ||||||
|  |     pub private: String, | ||||||
|  |     pub public: String, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Secret, Serialize, Deserialize, Debug, PartialEq)] | ||||||
|  | pub struct RedhatSecret { | ||||||
|  |     pub pull_secret: String, | ||||||
|  | } | ||||||
| @ -1,5 +1,3 @@ | |||||||
| use std::sync::Arc; |  | ||||||
| 
 |  | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
| use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive}; | use harmony_inventory_agent::hwinfo::{CPU, MemoryModule, NetworkInterface, StorageDrive}; | ||||||
| use harmony_types::net::MacAddress; | use harmony_types::net::MacAddress; | ||||||
| @ -10,7 +8,7 @@ pub type HostGroup = Vec<PhysicalHost>; | |||||||
| pub type SwitchGroup = Vec<Switch>; | pub type SwitchGroup = Vec<Switch>; | ||||||
| pub type FirewallGroup = Vec<PhysicalHost>; | pub type FirewallGroup = Vec<PhysicalHost>; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
| pub struct PhysicalHost { | pub struct PhysicalHost { | ||||||
|     pub id: Id, |     pub id: Id, | ||||||
|     pub category: HostCategory, |     pub category: HostCategory, | ||||||
| @ -151,6 +149,98 @@ impl PhysicalHost { | |||||||
|         parts.join(" | ") |         parts.join(" | ") | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn parts_list(&self) -> String { | ||||||
|  |         let PhysicalHost { | ||||||
|  |             id, | ||||||
|  |             category, | ||||||
|  |             network, | ||||||
|  |             storage, | ||||||
|  |             labels, | ||||||
|  |             memory_modules, | ||||||
|  |             cpus, | ||||||
|  |         } = self; | ||||||
|  | 
 | ||||||
|  |         let mut parts_list = String::new(); | ||||||
|  |         parts_list.push_str("\n\n====================="); | ||||||
|  |         parts_list.push_str(&format!("\nHost ID {id}")); | ||||||
|  |         parts_list.push_str("\n====================="); | ||||||
|  |         parts_list.push_str("\n\n====================="); | ||||||
|  |         parts_list.push_str(&format!("\nCPU count {}", cpus.len())); | ||||||
|  |         parts_list.push_str("\n====================="); | ||||||
|  |         cpus.iter().for_each(|c| { | ||||||
|  |             let CPU { | ||||||
|  |                 model, | ||||||
|  |                 vendor, | ||||||
|  |                 cores, | ||||||
|  |                 threads, | ||||||
|  |                 frequency_mhz, | ||||||
|  |             } = c; | ||||||
|  |             parts_list.push_str(&format!( | ||||||
|  |                 "\n{vendor} {model}, {cores}/{threads} {}Ghz", | ||||||
|  |                 *frequency_mhz as f64 / 1000.0 | ||||||
|  |             )); | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|  |         parts_list.push_str("\n\n====================="); | ||||||
|  |         parts_list.push_str(&format!("\nNetwork Interfaces count {}", network.len())); | ||||||
|  |         parts_list.push_str("\n====================="); | ||||||
|  |         network.iter().for_each(|nic| { | ||||||
|  |             parts_list.push_str(&format!( | ||||||
|  |                 "\nNic({} {}Gbps mac({}) ipv4({}), ipv6({})", | ||||||
|  |                 nic.name, | ||||||
|  |                 nic.speed_mbps.unwrap_or(0) / 1000, | ||||||
|  |                 nic.mac_address, | ||||||
|  |                 nic.ipv4_addresses.join(","), | ||||||
|  |                 nic.ipv6_addresses.join(",") | ||||||
|  |             )); | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|  |         parts_list.push_str("\n\n====================="); | ||||||
|  |         parts_list.push_str(&format!("\nStorage drives count {}", storage.len())); | ||||||
|  |         parts_list.push_str("\n====================="); | ||||||
|  |         storage.iter().for_each(|drive| { | ||||||
|  |             let StorageDrive { | ||||||
|  |                 name, | ||||||
|  |                 model, | ||||||
|  |                 serial, | ||||||
|  |                 size_bytes, | ||||||
|  |                 logical_block_size: _, | ||||||
|  |                 physical_block_size: _, | ||||||
|  |                 rotational: _, | ||||||
|  |                 wwn: _, | ||||||
|  |                 interface_type, | ||||||
|  |                 smart_status, | ||||||
|  |             } = drive; | ||||||
|  |             parts_list.push_str(&format!( | ||||||
|  |                 "\n{name} {}Gb {model} {interface_type} smart({smart_status:?}) {serial}", | ||||||
|  |                 size_bytes / 1000 / 1000 / 1000 | ||||||
|  |             )); | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|  |         parts_list.push_str("\n\n====================="); | ||||||
|  |         parts_list.push_str(&format!("\nMemory modules count {}", memory_modules.len())); | ||||||
|  |         parts_list.push_str("\n====================="); | ||||||
|  |         memory_modules.iter().for_each(|mem| { | ||||||
|  |             let MemoryModule { | ||||||
|  |                 size_bytes, | ||||||
|  |                 speed_mhz, | ||||||
|  |                 manufacturer, | ||||||
|  |                 part_number, | ||||||
|  |                 serial_number, | ||||||
|  |                 rank, | ||||||
|  |             } = mem; | ||||||
|  |             parts_list.push_str(&format!( | ||||||
|  |                 "\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})", | ||||||
|  |                 size_bytes / 1000 / 1000 / 1000, | ||||||
|  |                 speed_mhz.unwrap_or(0), | ||||||
|  |                 manufacturer.as_ref().unwrap_or(&String::new()), | ||||||
|  |                 part_number.as_ref().unwrap_or(&String::new()), | ||||||
|  |             )); | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|  |         parts_list | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn cluster_mac(&self) -> MacAddress { |     pub fn cluster_mac(&self) -> MacAddress { | ||||||
|         self.network |         self.network | ||||||
|             .first() |             .first() | ||||||
| @ -173,6 +263,10 @@ impl PhysicalHost { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn get_mac_address(&self) -> Vec<MacAddress> { | ||||||
|  |         self.network.iter().map(|nic| nic.mac_address).collect() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn label(mut self, name: String, value: String) -> Self { |     pub fn label(mut self, name: String, value: String) -> Self { | ||||||
|         self.labels.push(Label { name, value }); |         self.labels.push(Label { name, value }); | ||||||
|         self |         self | ||||||
| @ -221,15 +315,6 @@ impl PhysicalHost { | |||||||
| //     }
 | //     }
 | ||||||
| // }
 | // }
 | ||||||
| 
 | 
 | ||||||
| impl<'de> Deserialize<'de> for PhysicalHost { |  | ||||||
|     fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> |  | ||||||
|     where |  | ||||||
|         D: serde::Deserializer<'de>, |  | ||||||
|     { |  | ||||||
|         todo!() |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[derive(new, Serialize)] | #[derive(new, Serialize)] | ||||||
| pub struct ManualManagementInterface; | pub struct ManualManagementInterface; | ||||||
| 
 | 
 | ||||||
| @ -273,16 +358,13 @@ where | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
| pub enum HostCategory { | pub enum HostCategory { | ||||||
|     Server, |     Server, | ||||||
|     Firewall, |     Firewall, | ||||||
|     Switch, |     Switch, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[cfg(test)] |  | ||||||
| use harmony_macros::mac_address; |  | ||||||
| 
 |  | ||||||
| use harmony_types::id::Id; | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Clone, Serialize)] | #[derive(Debug, Clone, Serialize)] | ||||||
| @ -291,7 +373,7 @@ pub struct Switch { | |||||||
|     _management_interface: NetworkInterface, |     _management_interface: NetworkInterface, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize, Deserialize)] | ||||||
| pub struct Label { | pub struct Label { | ||||||
|     pub name: String, |     pub name: String, | ||||||
|     pub value: String, |     pub value: String, | ||||||
|  | |||||||
| @ -32,6 +32,7 @@ pub enum InterpretName { | |||||||
|     K8sPrometheusCrdAlerting, |     K8sPrometheusCrdAlerting, | ||||||
|     DiscoverInventoryAgent, |     DiscoverInventoryAgent, | ||||||
|     CephClusterHealth, |     CephClusterHealth, | ||||||
|  |     Custom(&'static str), | ||||||
|     RHOBAlerting, |     RHOBAlerting, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -61,6 +62,7 @@ impl std::fmt::Display for InterpretName { | |||||||
|             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), |             InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"), | ||||||
|             InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), |             InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"), | ||||||
|             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), |             InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"), | ||||||
|  |             InterpretName::Custom(name) => f.write_str(name), | ||||||
|             InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), |             InterpretName::RHOBAlerting => f.write_str("RHOBAlerting"), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| @ -142,6 +144,12 @@ impl From<PreparationError> for InterpretError { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl From<harmony_secret::SecretStoreError> for InterpretError { | ||||||
|  |     fn from(value: harmony_secret::SecretStoreError) -> Self { | ||||||
|  |         InterpretError::new(format!("Interpret error : {value}")) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl From<ExecutorError> for InterpretError { | impl From<ExecutorError> for InterpretError { | ||||||
|     fn from(value: ExecutorError) -> Self { |     fn from(value: ExecutorError) -> Self { | ||||||
|         Self { |         Self { | ||||||
|  | |||||||
| @ -17,12 +17,14 @@ impl InventoryFilter { | |||||||
| 
 | 
 | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
| use log::info; | use log::info; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use strum::EnumIter; | ||||||
| 
 | 
 | ||||||
| use crate::hardware::{ManagementInterface, ManualManagementInterface}; | use crate::hardware::{ManagementInterface, ManualManagementInterface}; | ||||||
| 
 | 
 | ||||||
| use super::{ | use super::{ | ||||||
|     filter::Filter, |     filter::Filter, | ||||||
|     hardware::{FirewallGroup, HostGroup, Location, SwitchGroup}, |     hardware::{HostGroup, Location, SwitchGroup}, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| @ -61,3 +63,11 @@ impl Inventory { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Serialize, Deserialize, sqlx::Type, Clone, EnumIter)] | ||||||
|  | pub enum HostRole { | ||||||
|  |     Bootstrap, | ||||||
|  |     ControlPlane, | ||||||
|  |     Worker, | ||||||
|  |     Storage, | ||||||
|  | } | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| 
 | 
 | ||||||
| use crate::hardware::PhysicalHost; | use crate::{hardware::PhysicalHost, interpret::InterpretError, inventory::HostRole}; | ||||||
| 
 | 
 | ||||||
| /// Errors that can occur within the repository layer.
 | /// Errors that can occur within the repository layer.
 | ||||||
| #[derive(thiserror::Error, Debug)] | #[derive(thiserror::Error, Debug)] | ||||||
| @ -15,6 +15,12 @@ pub enum RepoError { | |||||||
|     ConnectionFailed(String), |     ConnectionFailed(String), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl From<RepoError> for InterpretError { | ||||||
|  |     fn from(value: RepoError) -> Self { | ||||||
|  |         InterpretError::new(format!("Interpret error : {value}")) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // --- Trait and Implementation ---
 | // --- Trait and Implementation ---
 | ||||||
| 
 | 
 | ||||||
| /// Defines the contract for inventory persistence.
 | /// Defines the contract for inventory persistence.
 | ||||||
| @ -22,4 +28,11 @@ pub enum RepoError { | |||||||
| pub trait InventoryRepository: Send + Sync + 'static { | pub trait InventoryRepository: Send + Sync + 'static { | ||||||
|     async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; |     async fn save(&self, host: &PhysicalHost) -> Result<(), RepoError>; | ||||||
|     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>; |     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError>; | ||||||
|  |     async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError>; | ||||||
|  |     async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError>; | ||||||
|  |     async fn save_role_mapping( | ||||||
|  |         &self, | ||||||
|  |         role: &HostRole, | ||||||
|  |         host: &PhysicalHost, | ||||||
|  |     ) -> Result<(), RepoError>; | ||||||
| } | } | ||||||
|  | |||||||
| @ -69,6 +69,26 @@ impl K8sclient for HAClusterTopology { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl HAClusterTopology { | impl HAClusterTopology { | ||||||
|  |     // TODO this is a hack to avoid refactoring
 | ||||||
|  |     pub fn get_cluster_name(&self) -> String { | ||||||
|  |         self.domain_name | ||||||
|  |             .split(".") | ||||||
|  |             .next() | ||||||
|  |             .expect("Cluster domain name must not be empty") | ||||||
|  |             .to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn get_cluster_base_domain(&self) -> String { | ||||||
|  |         let base_domain = self | ||||||
|  |             .domain_name | ||||||
|  |             .strip_prefix(&self.get_cluster_name()) | ||||||
|  |             .expect("cluster domain must start with cluster name"); | ||||||
|  |         base_domain | ||||||
|  |             .strip_prefix(".") | ||||||
|  |             .unwrap_or(base_domain) | ||||||
|  |             .to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn autoload() -> Self { |     pub fn autoload() -> Self { | ||||||
|         let dummy_infra = Arc::new(DummyInfra {}); |         let dummy_infra = Arc::new(DummyInfra {}); | ||||||
|         let dummy_host = LogicalHost { |         let dummy_host = LogicalHost { | ||||||
| @ -161,6 +181,14 @@ impl DhcpServer for HAClusterTopology { | |||||||
|         self.dhcp_server.set_pxe_options(options).await |         self.dhcp_server.set_pxe_options(options).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     async fn set_dhcp_range( | ||||||
|  |         &self, | ||||||
|  |         start: &IpAddress, | ||||||
|  |         end: &IpAddress, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|  |         self.dhcp_server.set_dhcp_range(start, end).await | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         self.dhcp_server.get_ip() |         self.dhcp_server.get_ip() | ||||||
|     } |     } | ||||||
| @ -209,8 +237,12 @@ impl Router for HAClusterTopology { | |||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl HttpServer for HAClusterTopology { | impl HttpServer for HAClusterTopology { | ||||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { |     async fn serve_files( | ||||||
|         self.http_server.serve_files(url).await |         &self, | ||||||
|  |         url: &Url, | ||||||
|  |         remote_path: &Option<String>, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|  |         self.http_server.serve_files(url, remote_path).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { |     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> { | ||||||
| @ -298,6 +330,13 @@ impl DhcpServer for DummyInfra { | |||||||
|     async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { |     async fn set_pxe_options(&self, _options: PxeOptions) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|  |     async fn set_dhcp_range( | ||||||
|  |         &self, | ||||||
|  |         start: &IpAddress, | ||||||
|  |         end: &IpAddress, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|  |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|  |     } | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
| @ -362,7 +401,11 @@ impl TftpServer for DummyInfra { | |||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl HttpServer for DummyInfra { | impl HttpServer for DummyInfra { | ||||||
|     async fn serve_files(&self, _url: &Url) -> Result<(), ExecutorError> { |     async fn serve_files( | ||||||
|  |         &self, | ||||||
|  |         _url: &Url, | ||||||
|  |         _remote_path: &Option<String>, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) |         unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) | ||||||
|     } |     } | ||||||
|     async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> { |     async fn serve_file_content(&self, _file: &FileContent) -> Result<(), ExecutorError> { | ||||||
|  | |||||||
| @ -5,7 +5,11 @@ use harmony_types::net::IpAddress; | |||||||
| use harmony_types::net::Url; | use harmony_types::net::Url; | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait HttpServer: Send + Sync { | pub trait HttpServer: Send + Sync { | ||||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError>; |     async fn serve_files( | ||||||
|  |         &self, | ||||||
|  |         url: &Url, | ||||||
|  |         remote_path: &Option<String>, | ||||||
|  |     ) -> Result<(), ExecutorError>; | ||||||
|     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>; |     async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError>; | ||||||
|     fn get_ip(&self) -> IpAddress; |     fn get_ip(&self) -> IpAddress; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -102,8 +102,17 @@ pub enum HttpStatusCode { | |||||||
|     ServerError5xx, |     ServerError5xx, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #[derive(Debug, Clone, PartialEq, Serialize)] | ||||||
|  | pub enum SSL { | ||||||
|  |     SSL, | ||||||
|  |     Disabled, | ||||||
|  |     Default, | ||||||
|  |     SNI, | ||||||
|  |     Other(String), | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[derive(Debug, Clone, PartialEq, Serialize)] | #[derive(Debug, Clone, PartialEq, Serialize)] | ||||||
| pub enum HealthCheck { | pub enum HealthCheck { | ||||||
|     HTTP(String, HttpMethod, HttpStatusCode), |     HTTP(String, HttpMethod, HttpStatusCode, SSL), | ||||||
|     TCP(Option<u16>), |     TCP(Option<u16>), | ||||||
| } | } | ||||||
|  | |||||||
| @ -11,15 +11,21 @@ use super::{LogicalHost, k8s::K8sClient}; | |||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| pub struct DHCPStaticEntry { | pub struct DHCPStaticEntry { | ||||||
|     pub name: String, |     pub name: String, | ||||||
|     pub mac: MacAddress, |     pub mac: Vec<MacAddress>, | ||||||
|     pub ip: Ipv4Addr, |     pub ip: Ipv4Addr, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl std::fmt::Display for DHCPStaticEntry { | impl std::fmt::Display for DHCPStaticEntry { | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|  |         let mac = self | ||||||
|  |             .mac | ||||||
|  |             .iter() | ||||||
|  |             .map(|m| m.to_string()) | ||||||
|  |             .collect::<Vec<String>>() | ||||||
|  |             .join(","); | ||||||
|         f.write_fmt(format_args!( |         f.write_fmt(format_args!( | ||||||
|             "DHCPStaticEntry : name {}, mac {}, ip {}", |             "DHCPStaticEntry : name {}, mac {}, ip {}", | ||||||
|             self.name, self.mac, self.ip |             self.name, mac, self.ip | ||||||
|         )) |         )) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @ -41,6 +47,7 @@ impl std::fmt::Debug for dyn Firewall { | |||||||
| pub struct NetworkDomain { | pub struct NetworkDomain { | ||||||
|     pub name: String, |     pub name: String, | ||||||
| } | } | ||||||
|  | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait K8sclient: Send + Sync { | pub trait K8sclient: Send + Sync { | ||||||
|     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>; |     async fn k8s_client(&self) -> Result<Arc<K8sClient>, String>; | ||||||
| @ -59,6 +66,8 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug { | |||||||
|     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; |     async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>; | ||||||
|     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; |     async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; | ||||||
|     async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; |     async fn set_pxe_options(&self, pxe_options: PxeOptions) -> Result<(), ExecutorError>; | ||||||
|  |     async fn set_dhcp_range(&self, start: &IpAddress, end: &IpAddress) | ||||||
|  |     -> Result<(), ExecutorError>; | ||||||
|     fn get_ip(&self) -> IpAddress; |     fn get_ip(&self) -> IpAddress; | ||||||
|     fn get_host(&self) -> LogicalHost; |     fn get_host(&self) -> LogicalHost; | ||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError>; |     async fn commit_config(&self) -> Result<(), ExecutorError>; | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| use crate::{ | use crate::{ | ||||||
|     hardware::PhysicalHost, |     hardware::PhysicalHost, | ||||||
|     inventory::{InventoryRepository, RepoError}, |     inventory::{HostRole, InventoryRepository, RepoError}, | ||||||
| }; | }; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use harmony_types::id::Id; | use harmony_types::id::Id; | ||||||
| @ -46,20 +46,104 @@ impl InventoryRepository for SqliteInventoryRepository { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> { |     async fn get_latest_by_id(&self, host_id: &str) -> Result<Option<PhysicalHost>, RepoError> { | ||||||
|         let _row = sqlx::query_as!( |         let row = sqlx::query_as!( | ||||||
|             DbHost, |             DbHost, | ||||||
|             r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, |             r#"SELECT id, version_id, data as "data: Json<PhysicalHost>" FROM physical_hosts WHERE id = ? ORDER BY version_id DESC LIMIT 1"#, | ||||||
|             host_id |             host_id | ||||||
|         ) |         ) | ||||||
|         .fetch_optional(&self.pool) |         .fetch_optional(&self.pool) | ||||||
|         .await?; |         .await?; | ||||||
|         todo!() | 
 | ||||||
|  |         Ok(row.map(|r| r.data.0)) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn get_all_hosts(&self) -> Result<Vec<PhysicalHost>, RepoError> { | ||||||
|  |         let db_hosts = sqlx::query_as!( | ||||||
|  |             DbHost, | ||||||
|  |             r#" | ||||||
|  |         SELECT | ||||||
|  |             p1.id, | ||||||
|  |             p1.version_id, | ||||||
|  |             p1.data as "data: Json<PhysicalHost>" | ||||||
|  |         FROM | ||||||
|  |             physical_hosts p1 | ||||||
|  |         INNER JOIN ( | ||||||
|  |             SELECT | ||||||
|  |                 id, | ||||||
|  |                 MAX(version_id) AS max_version | ||||||
|  |             FROM | ||||||
|  |                 physical_hosts | ||||||
|  |             GROUP BY | ||||||
|  |                 id | ||||||
|  |         ) p2 ON p1.id = p2.id AND p1.version_id = p2.max_version | ||||||
|  |         "#
 | ||||||
|  |         ) | ||||||
|  |         .fetch_all(&self.pool) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         let hosts = db_hosts.into_iter().map(|row| row.data.0).collect(); | ||||||
|  | 
 | ||||||
|  |         Ok(hosts) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn save_role_mapping( | ||||||
|  |         &self, | ||||||
|  |         role: &HostRole, | ||||||
|  |         host: &PhysicalHost, | ||||||
|  |     ) -> Result<(), RepoError> { | ||||||
|  |         let host_id = host.id.to_string(); | ||||||
|  | 
 | ||||||
|  |         sqlx::query!( | ||||||
|  |             r#" | ||||||
|  |         INSERT INTO host_role_mapping (host_id, role) | ||||||
|  |         VALUES (?, ?) | ||||||
|  |         "#,
 | ||||||
|  |             host_id, | ||||||
|  |             role | ||||||
|  |         ) | ||||||
|  |         .execute(&self.pool) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         info!("Saved role mapping for host '{}' as '{:?}'", host.id, role); | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn get_host_for_role(&self, role: &HostRole) -> Result<Vec<PhysicalHost>, RepoError> { | ||||||
|  |         struct HostIdRow { | ||||||
|  |             host_id: String, | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         let role_str = format!("{:?}", role); | ||||||
|  | 
 | ||||||
|  |         let host_id_rows = sqlx::query_as!( | ||||||
|  |             HostIdRow, | ||||||
|  |             "SELECT host_id FROM host_role_mapping WHERE role = ?", | ||||||
|  |             role_str | ||||||
|  |         ) | ||||||
|  |         .fetch_all(&self.pool) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         let mut hosts = Vec::with_capacity(host_id_rows.len()); | ||||||
|  |         for row in host_id_rows { | ||||||
|  |             match self.get_latest_by_id(&row.host_id).await? { | ||||||
|  |                 Some(host) => hosts.push(host), | ||||||
|  |                 None => { | ||||||
|  |                     log::warn!( | ||||||
|  |                         "Found a role mapping for host_id '{}', but the host does not exist in the physical_hosts table. This may indicate a data integrity issue.", | ||||||
|  |                         row.host_id | ||||||
|  |                     ); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(hosts) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| use sqlx::types::Json; | use sqlx::types::Json; | ||||||
| struct DbHost { | struct DbHost { | ||||||
|     data: Json<PhysicalHost>, |     data: Json<PhysicalHost>, | ||||||
|     id: Id, |     id: String, | ||||||
|     version_id: Id, |     version_id: String, | ||||||
| } | } | ||||||
|  | |||||||
| @ -17,13 +17,13 @@ impl DhcpServer for OPNSenseFirewall { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> { |     async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError> { | ||||||
|         let mac: String = String::from(&entry.mac); |         let mac: Vec<String> = entry.mac.iter().map(MacAddress::to_string).collect(); | ||||||
| 
 | 
 | ||||||
|         { |         { | ||||||
|             let mut writable_opnsense = self.opnsense_config.write().await; |             let mut writable_opnsense = self.opnsense_config.write().await; | ||||||
|             writable_opnsense |             writable_opnsense | ||||||
|                 .dhcp() |                 .dhcp() | ||||||
|                 .add_static_mapping(&mac, entry.ip, &entry.name) |                 .add_static_mapping(&mac, &entry.ip, &entry.name) | ||||||
|                 .unwrap(); |                 .unwrap(); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| @ -68,4 +68,19 @@ impl DhcpServer for OPNSenseFirewall { | |||||||
|                 ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) |                 ExecutorError::UnexpectedError(format!("Failed to set_pxe_options : {dhcp_error}")) | ||||||
|             }) |             }) | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     async fn set_dhcp_range( | ||||||
|  |         &self, | ||||||
|  |         start: &IpAddress, | ||||||
|  |         end: &IpAddress, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|  |         let mut writable_opnsense = self.opnsense_config.write().await; | ||||||
|  |         writable_opnsense | ||||||
|  |             .dhcp() | ||||||
|  |             .set_dhcp_range(&start.to_string(), &end.to_string()) | ||||||
|  |             .await | ||||||
|  |             .map_err(|dhcp_error| { | ||||||
|  |                 ExecutorError::UnexpectedError(format!("Failed to set_dhcp_range : {dhcp_error}")) | ||||||
|  |             }) | ||||||
|  |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -1,4 +1,3 @@ | |||||||
| use crate::infra::opnsense::Host; |  | ||||||
| use crate::infra::opnsense::LogicalHost; | use crate::infra::opnsense::LogicalHost; | ||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
| @ -12,21 +11,22 @@ use super::OPNSenseFirewall; | |||||||
| #[async_trait] | #[async_trait] | ||||||
| impl DnsServer for OPNSenseFirewall { | impl DnsServer for OPNSenseFirewall { | ||||||
|     async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> { |     async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> { | ||||||
|         let mut writable_opnsense = self.opnsense_config.write().await; |         todo!("Refactor this to use dnsmasq") | ||||||
|         let mut dns = writable_opnsense.dns(); |         // let mut writable_opnsense = self.opnsense_config.write().await;
 | ||||||
|         let hosts = hosts |         // let mut dns = writable_opnsense.dns();
 | ||||||
|             .iter() |         // let hosts = hosts
 | ||||||
|             .map(|h| { |         //     .iter()
 | ||||||
|                 Host::new( |         //     .map(|h| {
 | ||||||
|                     h.host.clone(), |         //         Host::new(
 | ||||||
|                     h.domain.clone(), |         //             h.host.clone(),
 | ||||||
|                     h.record_type.to_string(), |         //             h.domain.clone(),
 | ||||||
|                     h.value.to_string(), |         //             h.record_type.to_string(),
 | ||||||
|                 ) |         //             h.value.to_string(),
 | ||||||
|             }) |         //         )
 | ||||||
|             .collect(); |         //     })
 | ||||||
|         dns.register_hosts(hosts); |         //     .collect();
 | ||||||
|         Ok(()) |         // dns.add_static_mapping(hosts);
 | ||||||
|  |         // Ok(())
 | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn remove_record( |     fn remove_record( | ||||||
| @ -38,25 +38,26 @@ impl DnsServer for OPNSenseFirewall { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn list_records(&self) -> Vec<crate::topology::DnsRecord> { |     async fn list_records(&self) -> Vec<crate::topology::DnsRecord> { | ||||||
|         self.opnsense_config |         todo!("Refactor this to use dnsmasq") | ||||||
|             .write() |         // self.opnsense_config
 | ||||||
|             .await |         //     .write()
 | ||||||
|             .dns() |         //     .await
 | ||||||
|             .get_hosts() |         //     .dns()
 | ||||||
|             .iter() |         //     .get_hosts()
 | ||||||
|             .map(|h| DnsRecord { |         //     .iter()
 | ||||||
|                 host: h.hostname.clone(), |         //     .map(|h| DnsRecord {
 | ||||||
|                 domain: h.domain.clone(), |         //         host: h.hostname.clone(),
 | ||||||
|                 record_type: h |         //         domain: h.domain.clone(),
 | ||||||
|                     .rr |         //         record_type: h
 | ||||||
|                     .parse() |         //             .rr
 | ||||||
|                     .expect("received invalid record type {h.rr} from opnsense"), |         //             .parse()
 | ||||||
|                 value: h |         //             .expect("received invalid record type {h.rr} from opnsense"),
 | ||||||
|                     .server |         //         value: h
 | ||||||
|                     .parse() |         //             .server
 | ||||||
|                     .expect("received invalid ipv4 record from opnsense {h.server}"), |         //             .parse()
 | ||||||
|             }) |         //             .expect("received invalid ipv4 record from opnsense {h.server}"),
 | ||||||
|             .collect() |         //     })
 | ||||||
|  |         //     .collect()
 | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn get_ip(&self) -> IpAddress { |     fn get_ip(&self) -> IpAddress { | ||||||
| @ -68,11 +69,12 @@ impl DnsServer for OPNSenseFirewall { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { |     async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> { | ||||||
|         let mut writable_opnsense = self.opnsense_config.write().await; |         todo!("Refactor this to use dnsmasq") | ||||||
|         let mut dns = writable_opnsense.dns(); |         // let mut writable_opnsense = self.opnsense_config.write().await;
 | ||||||
|         dns.register_dhcp_leases(register); |         // let mut dns = writable_opnsense.dns();
 | ||||||
| 
 |         // dns.register_dhcp_leases(register);
 | ||||||
|         Ok(()) |         //
 | ||||||
|  |         // Ok(())
 | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn commit_config(&self) -> Result<(), ExecutorError> { |     async fn commit_config(&self) -> Result<(), ExecutorError> { | ||||||
|  | |||||||
| @ -10,13 +10,21 @@ const OPNSENSE_HTTP_ROOT_PATH: &str = "/usr/local/http"; | |||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl HttpServer for OPNSenseFirewall { | impl HttpServer for OPNSenseFirewall { | ||||||
|     async fn serve_files(&self, url: &Url) -> Result<(), ExecutorError> { |     async fn serve_files( | ||||||
|  |         &self, | ||||||
|  |         url: &Url, | ||||||
|  |         remote_path: &Option<String>, | ||||||
|  |     ) -> Result<(), ExecutorError> { | ||||||
|         let config = self.opnsense_config.read().await; |         let config = self.opnsense_config.read().await; | ||||||
|         info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}"); |         info!("Uploading files from url {url} to {OPNSENSE_HTTP_ROOT_PATH}"); | ||||||
|  |         let remote_upload_path = remote_path | ||||||
|  |             .clone() | ||||||
|  |             .map(|r| format!("{OPNSENSE_HTTP_ROOT_PATH}/{r}")) | ||||||
|  |             .unwrap_or(OPNSENSE_HTTP_ROOT_PATH.to_string()); | ||||||
|         match url { |         match url { | ||||||
|             Url::LocalFolder(path) => { |             Url::LocalFolder(path) => { | ||||||
|                 config |                 config | ||||||
|                     .upload_files(path, OPNSENSE_HTTP_ROOT_PATH) |                     .upload_files(path, &remote_upload_path) | ||||||
|                     .await |                     .await | ||||||
|                     .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; |                     .map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?; | ||||||
|             } |             } | ||||||
|  | |||||||
| @ -1,13 +1,15 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::{debug, info, warn}; | use log::{debug, error, info, warn}; | ||||||
| use opnsense_config_xml::{Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer}; | use opnsense_config_xml::{ | ||||||
|  |     Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, MaybeString, | ||||||
|  | }; | ||||||
| use uuid::Uuid; | use uuid::Uuid; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     executors::ExecutorError, |     executors::ExecutorError, | ||||||
|     topology::{ |     topology::{ | ||||||
|         BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService, |         BackendServer, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, LoadBalancerService, | ||||||
|         LogicalHost, |         LogicalHost, SSL, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
| use harmony_types::net::IpAddress; | use harmony_types::net::IpAddress; | ||||||
| @ -206,7 +208,22 @@ pub(crate) fn get_health_check_for_backend( | |||||||
|                 .unwrap_or_default() |                 .unwrap_or_default() | ||||||
|                 .into(); |                 .into(); | ||||||
|             let status_code: HttpStatusCode = HttpStatusCode::Success2xx; |             let status_code: HttpStatusCode = HttpStatusCode::Success2xx; | ||||||
|             Some(HealthCheck::HTTP(path, method, status_code)) |             let ssl = match haproxy_health_check | ||||||
|  |                 .ssl | ||||||
|  |                 .content_string() | ||||||
|  |                 .to_uppercase() | ||||||
|  |                 .as_str() | ||||||
|  |             { | ||||||
|  |                 "SSL" => SSL::SSL, | ||||||
|  |                 "SSLNI" => SSL::SNI, | ||||||
|  |                 "NOSSL" => SSL::Disabled, | ||||||
|  |                 "" => SSL::Default, | ||||||
|  |                 other => { | ||||||
|  |                     error!("Unknown haproxy health check ssl config {other}"); | ||||||
|  |                     SSL::Other(other.to_string()) | ||||||
|  |                 } | ||||||
|  |             }; | ||||||
|  |             Some(HealthCheck::HTTP(path, method, status_code, ssl)) | ||||||
|         } |         } | ||||||
|         _ => panic!("Received unsupported health check type {}", uppercase), |         _ => panic!("Received unsupported health check type {}", uppercase), | ||||||
|     } |     } | ||||||
| @ -241,7 +258,14 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( | |||||||
|     // frontend points to backend
 |     // frontend points to backend
 | ||||||
|     let healthcheck = if let Some(health_check) = &service.health_check { |     let healthcheck = if let Some(health_check) = &service.health_check { | ||||||
|         match health_check { |         match health_check { | ||||||
|             HealthCheck::HTTP(path, http_method, _http_status_code) => { |             HealthCheck::HTTP(path, http_method, _http_status_code, ssl) => { | ||||||
|  |                 let ssl: MaybeString = match ssl { | ||||||
|  |                     SSL::SSL => "ssl".into(), | ||||||
|  |                     SSL::SNI => "sslni".into(), | ||||||
|  |                     SSL::Disabled => "nossl".into(), | ||||||
|  |                     SSL::Default => "".into(), | ||||||
|  |                     SSL::Other(other) => other.as_str().into(), | ||||||
|  |                 }; | ||||||
|                 let haproxy_check = HAProxyHealthCheck { |                 let haproxy_check = HAProxyHealthCheck { | ||||||
|                     name: format!("HTTP_{http_method}_{path}"), |                     name: format!("HTTP_{http_method}_{path}"), | ||||||
|                     uuid: Uuid::new_v4().to_string(), |                     uuid: Uuid::new_v4().to_string(), | ||||||
| @ -249,6 +273,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( | |||||||
|                     health_check_type: "http".to_string(), |                     health_check_type: "http".to_string(), | ||||||
|                     http_uri: path.clone().into(), |                     http_uri: path.clone().into(), | ||||||
|                     interval: "2s".to_string(), |                     interval: "2s".to_string(), | ||||||
|  |                     ssl, | ||||||
|                     ..Default::default() |                     ..Default::default() | ||||||
|                 }; |                 }; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,7 +1,7 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
| use harmony_types::id::Id; | use harmony_types::id::Id; | ||||||
| use log::info; | use log::{info, trace}; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
| @ -22,6 +22,8 @@ pub struct DhcpScore { | |||||||
|     pub filename: Option<String>, |     pub filename: Option<String>, | ||||||
|     pub filename64: Option<String>, |     pub filename64: Option<String>, | ||||||
|     pub filenameipxe: Option<String>, |     pub filenameipxe: Option<String>, | ||||||
|  |     pub dhcp_range: (IpAddress, IpAddress), | ||||||
|  |     pub domain: Option<String>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: Topology + DhcpServer> Score<T> for DhcpScore { | impl<T: Topology + DhcpServer> Score<T> for DhcpScore { | ||||||
| @ -52,48 +54,6 @@ impl DhcpInterpret { | |||||||
|             status: InterpretStatus::QUEUED, |             status: InterpretStatus::QUEUED, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     async fn add_static_entries<D: DhcpServer>( |  | ||||||
|         &self, |  | ||||||
|         _inventory: &Inventory, |  | ||||||
|         dhcp_server: &D, |  | ||||||
|     ) -> Result<Outcome, InterpretError> { |  | ||||||
|         let dhcp_entries: Vec<DHCPStaticEntry> = self |  | ||||||
|             .score |  | ||||||
|             .host_binding |  | ||||||
|             .iter() |  | ||||||
|             .map(|binding| { |  | ||||||
|                 let ip = match binding.logical_host.ip { |  | ||||||
|                     std::net::IpAddr::V4(ipv4) => ipv4, |  | ||||||
|                     std::net::IpAddr::V6(_) => { |  | ||||||
|                         unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") |  | ||||||
|                     } |  | ||||||
|                 }; |  | ||||||
| 
 |  | ||||||
|                 DHCPStaticEntry { |  | ||||||
|                     name: binding.logical_host.name.clone(), |  | ||||||
|                     mac: binding.physical_host.cluster_mac(), |  | ||||||
|                     ip, |  | ||||||
|                 } |  | ||||||
|             }) |  | ||||||
|             .collect(); |  | ||||||
|         info!("DHCPStaticEntry : {:?}", dhcp_entries); |  | ||||||
| 
 |  | ||||||
|         info!("DHCP server : {:?}", dhcp_server); |  | ||||||
| 
 |  | ||||||
|         let number_new_entries = dhcp_entries.len(); |  | ||||||
| 
 |  | ||||||
|         for entry in dhcp_entries.into_iter() { |  | ||||||
|             match dhcp_server.add_static_mapping(&entry).await { |  | ||||||
|                 Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), |  | ||||||
|                 Err(_) => todo!(), |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         Ok(Outcome::new( |  | ||||||
|             InterpretStatus::SUCCESS, |  | ||||||
|             format!("Dhcp Interpret registered {} entries", number_new_entries), |  | ||||||
|         )) |  | ||||||
|     } |  | ||||||
| 
 | 
 | ||||||
|     async fn set_pxe_options<D: DhcpServer>( |     async fn set_pxe_options<D: DhcpServer>( | ||||||
|         &self, |         &self, | ||||||
| @ -124,7 +84,7 @@ impl DhcpInterpret { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | impl<T: Topology + DhcpServer> Interpret<T> for DhcpInterpret { | ||||||
|     fn get_name(&self) -> InterpretName { |     fn get_name(&self) -> InterpretName { | ||||||
|         InterpretName::OPNSenseDHCP |         InterpretName::OPNSenseDHCP | ||||||
|     } |     } | ||||||
| @ -149,8 +109,16 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | |||||||
|         info!("Executing DhcpInterpret on inventory {inventory:?}"); |         info!("Executing DhcpInterpret on inventory {inventory:?}"); | ||||||
| 
 | 
 | ||||||
|         self.set_pxe_options(inventory, topology).await?; |         self.set_pxe_options(inventory, topology).await?; | ||||||
|  |         topology | ||||||
|  |             .set_dhcp_range(&self.score.dhcp_range.0, &self.score.dhcp_range.1) | ||||||
|  |             .await?; | ||||||
| 
 | 
 | ||||||
|         self.add_static_entries(inventory, topology).await?; |         DhcpHostBindingScore { | ||||||
|  |             host_binding: self.score.host_binding.clone(), | ||||||
|  |             domain: self.score.domain.clone(), | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
| 
 | 
 | ||||||
|         topology.commit_config().await?; |         topology.commit_config().await?; | ||||||
| 
 | 
 | ||||||
| @ -160,3 +128,120 @@ impl<T: DhcpServer> Interpret<T> for DhcpInterpret { | |||||||
|         )) |         )) | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, new, Clone, Serialize)] | ||||||
|  | pub struct DhcpHostBindingScore { | ||||||
|  |     pub host_binding: Vec<HostBinding>, | ||||||
|  |     pub domain: Option<String>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: Topology + DhcpServer> Score<T> for DhcpHostBindingScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         Box::new(DhcpHostBindingInterpret { | ||||||
|  |             score: self.clone(), | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "DhcpHostBindingScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // https://docs.opnsense.org/manual/dhcp.html#advanced-settings
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct DhcpHostBindingInterpret { | ||||||
|  |     score: DhcpHostBindingScore, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl DhcpHostBindingInterpret { | ||||||
|  |     async fn add_static_entries<D: DhcpServer>( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         dhcp_server: &D, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         let dhcp_entries: Vec<DHCPStaticEntry> = self | ||||||
|  |             .score | ||||||
|  |             .host_binding | ||||||
|  |             .iter() | ||||||
|  |             .map(|binding| { | ||||||
|  |                 let ip = match binding.logical_host.ip { | ||||||
|  |                     std::net::IpAddr::V4(ipv4) => ipv4, | ||||||
|  |                     std::net::IpAddr::V6(_) => { | ||||||
|  |                         unimplemented!("DHCPStaticEntry only supports ipv4 at the moment") | ||||||
|  |                     } | ||||||
|  |                 }; | ||||||
|  | 
 | ||||||
|  |                 let name = if let Some(domain) = self.score.domain.as_ref() { | ||||||
|  |                     format!("{}.{}", binding.logical_host.name, domain) | ||||||
|  |                 } else { | ||||||
|  |                     binding.logical_host.name.clone() | ||||||
|  |                 }; | ||||||
|  | 
 | ||||||
|  |                 DHCPStaticEntry { | ||||||
|  |                     name, | ||||||
|  |                     mac: binding.physical_host.get_mac_address(), | ||||||
|  |                     ip, | ||||||
|  |                 } | ||||||
|  |             }) | ||||||
|  |             .collect(); | ||||||
|  |         info!("DHCPStaticEntry : {:?}", dhcp_entries); | ||||||
|  | 
 | ||||||
|  |         trace!("DHCP server : {:?}", dhcp_server); | ||||||
|  | 
 | ||||||
|  |         let number_new_entries = dhcp_entries.len(); | ||||||
|  | 
 | ||||||
|  |         for entry in dhcp_entries.into_iter() { | ||||||
|  |             match dhcp_server.add_static_mapping(&entry).await { | ||||||
|  |                 Ok(_) => info!("Successfully registered DHCPStaticEntry {}", entry), | ||||||
|  |                 Err(_) => todo!(), | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             format!("Dhcp Interpret registered {} entries", number_new_entries), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl<T: DhcpServer> Interpret<T> for DhcpHostBindingInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("DhcpHostBindingInterpret") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> crate::domain::data::Version { | ||||||
|  |         Version::from("1.0.0").unwrap() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &T, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         info!( | ||||||
|  |             "Executing DhcpHostBindingInterpret on {} bindings", | ||||||
|  |             self.score.host_binding.len() | ||||||
|  |         ); | ||||||
|  | 
 | ||||||
|  |         self.add_static_entries(inventory, topology).await?; | ||||||
|  | 
 | ||||||
|  |         topology.commit_config().await?; | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             format!( | ||||||
|  |                 "Dhcp Host Binding Interpret execution successful on {} hosts", | ||||||
|  |                 self.score.host_binding.len() | ||||||
|  |             ), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | |||||||
| @ -3,14 +3,14 @@ use derive_new::new; | |||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{FileContent, Version}, |     data::{FileContent, FilePath, Version}, | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|     inventory::Inventory, |     inventory::Inventory, | ||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{HttpServer, Topology}, |     topology::{HttpServer, Topology}, | ||||||
| }; | }; | ||||||
| use harmony_types::id::Id; |  | ||||||
| use harmony_types::net::Url; | use harmony_types::net::Url; | ||||||
|  | use harmony_types::{id::Id, net::MacAddress}; | ||||||
| 
 | 
 | ||||||
| /// Configure an HTTP server that is provided by the Topology
 | /// Configure an HTTP server that is provided by the Topology
 | ||||||
| ///
 | ///
 | ||||||
| @ -25,8 +25,11 @@ use harmony_types::net::Url; | |||||||
| /// ```
 | /// ```
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize)] | ||||||
| pub struct StaticFilesHttpScore { | pub struct StaticFilesHttpScore { | ||||||
|  |     // TODO this should be split in two scores, one for folder and
 | ||||||
|  |     // other for files
 | ||||||
|     pub folder_to_serve: Option<Url>, |     pub folder_to_serve: Option<Url>, | ||||||
|     pub files: Vec<FileContent>, |     pub files: Vec<FileContent>, | ||||||
|  |     pub remote_path: Option<String>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore { | impl<T: Topology + HttpServer> Score<T> for StaticFilesHttpScore { | ||||||
| @ -54,7 +57,9 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret { | |||||||
|         http_server.ensure_initialized().await?; |         http_server.ensure_initialized().await?; | ||||||
|         // http_server.set_ip(topology.router.get_gateway()).await?;
 |         // http_server.set_ip(topology.router.get_gateway()).await?;
 | ||||||
|         if let Some(folder) = self.score.folder_to_serve.as_ref() { |         if let Some(folder) = self.score.folder_to_serve.as_ref() { | ||||||
|             http_server.serve_files(folder).await?; |             http_server | ||||||
|  |                 .serve_files(folder, &self.score.remote_path) | ||||||
|  |                 .await?; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         for f in self.score.files.iter() { |         for f in self.score.files.iter() { | ||||||
| @ -91,3 +96,34 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret { | |||||||
|         todo!() |         todo!() | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, new, Clone, Serialize)] | ||||||
|  | pub struct IPxeMacBootFileScore { | ||||||
|  |     pub content: String, | ||||||
|  |     pub mac_address: Vec<MacAddress>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: Topology + HttpServer> Score<T> for IPxeMacBootFileScore { | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "IPxeMacBootFileScore".to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         StaticFilesHttpScore { | ||||||
|  |             remote_path: None, | ||||||
|  |             folder_to_serve: None, | ||||||
|  |             files: self | ||||||
|  |                 .mac_address | ||||||
|  |                 .iter() | ||||||
|  |                 .map(|mac| FileContent { | ||||||
|  |                     path: FilePath::Relative(format!( | ||||||
|  |                         "byMAC/01-{}.ipxe", | ||||||
|  |                         mac.to_string().replace(":", "-") | ||||||
|  |                     )), | ||||||
|  |                     content: self.content.clone(), | ||||||
|  |                 }) | ||||||
|  |                 .collect(), | ||||||
|  |         } | ||||||
|  |         .create_interpret() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | |||||||
							
								
								
									
										122
									
								
								harmony/src/modules/inventory/discovery.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								harmony/src/modules/inventory/discovery.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,122 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{error, info}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::Version, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|  |     score::Score, | ||||||
|  |     topology::Topology, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
|  | pub struct DiscoverHostForRoleScore { | ||||||
|  |     pub role: HostRole, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: Topology> Score<T> for DiscoverHostForRoleScore { | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "DiscoverInventoryAgentScore".to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         Box::new(DiscoverHostForRoleInterpret { | ||||||
|  |             score: self.clone(), | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug)] | ||||||
|  | pub struct DiscoverHostForRoleInterpret { | ||||||
|  |     score: DiscoverHostForRoleScore, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret { | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &T, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         info!( | ||||||
|  |             "Launching discovery agent, make sure that your nodes are successfully PXE booted and running inventory agent. They should answer on `http://<node_ip>:8080/inventory`" | ||||||
|  |         ); | ||||||
|  |         LaunchDiscoverInventoryAgentScore { | ||||||
|  |             discovery_timeout: None, | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         let host: PhysicalHost; | ||||||
|  |         let host_repo = InventoryRepositoryFactory::build().await?; | ||||||
|  | 
 | ||||||
|  |         loop { | ||||||
|  |             let all_hosts = host_repo.get_all_hosts().await?; | ||||||
|  | 
 | ||||||
|  |             if all_hosts.is_empty() { | ||||||
|  |                 info!("No discovered hosts found yet. Waiting for hosts to appear..."); | ||||||
|  |                 // Sleep to avoid spamming the user and logs while waiting for nodes.
 | ||||||
|  |                 tokio::time::sleep(std::time::Duration::from_secs(3)).await; | ||||||
|  |                 continue; | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             let ans = inquire::Select::new( | ||||||
|  |                 &format!("Select the node to be used for role {:?}:", self.score.role), | ||||||
|  |                 all_hosts, | ||||||
|  |             ) | ||||||
|  |             .with_help_message("Press Esc to refresh the list of discovered hosts") | ||||||
|  |             .prompt(); | ||||||
|  | 
 | ||||||
|  |             match ans { | ||||||
|  |                 Ok(choice) => { | ||||||
|  |                     info!("Selected {} as the bootstrap node.", choice.summary()); | ||||||
|  |                     host_repo | ||||||
|  |                         .save_role_mapping(&self.score.role, &choice) | ||||||
|  |                         .await?; | ||||||
|  |                     host = choice; | ||||||
|  |                     break; | ||||||
|  |                 } | ||||||
|  |                 Err(inquire::InquireError::OperationCanceled) => { | ||||||
|  |                     info!("Refresh requested. Fetching list of discovered hosts again..."); | ||||||
|  |                     continue; | ||||||
|  |                 } | ||||||
|  |                 Err(e) => { | ||||||
|  |                     error!( | ||||||
|  |                         "Failed to select node for role {:?} : {}", | ||||||
|  |                         self.score.role, e | ||||||
|  |                     ); | ||||||
|  |                     return Err(InterpretError::new(format!( | ||||||
|  |                         "Could not select host : {}", | ||||||
|  |                         e.to_string() | ||||||
|  |                     ))); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::success(format!( | ||||||
|  |             "Successfully discovered host {} for role {:?}", | ||||||
|  |             host.summary(), | ||||||
|  |             self.score.role | ||||||
|  |         ))) | ||||||
|  |     } | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("DiscoverHostForRoleScore") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										72
									
								
								harmony/src/modules/inventory/inspect.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								harmony/src/modules/inventory/inspect.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,72 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::info; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use strum::IntoEnumIterator; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::Version, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     score::Score, | ||||||
|  |     topology::Topology, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Serialize, Deserialize, Clone)] | ||||||
|  | pub struct InspectInventoryScore {} | ||||||
|  | 
 | ||||||
|  | impl<T: Topology> Score<T> for InspectInventoryScore { | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "InspectInventoryScore".to_string() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[doc(hidden)] | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|  |         Box::new(InspectInventoryInterpret {}) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug)] | ||||||
|  | pub struct InspectInventoryInterpret; | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl<T: Topology> Interpret<T> for InspectInventoryInterpret { | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         _topology: &T, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         let repo = InventoryRepositoryFactory::build().await?; | ||||||
|  |         for role in HostRole::iter() { | ||||||
|  |             info!("Inspecting hosts for role {role:?}"); | ||||||
|  |             let hosts = repo.get_host_for_role(&role).await?; | ||||||
|  |             info!("Hosts with role {role:?} : {}", hosts.len()); | ||||||
|  |             hosts.iter().enumerate().for_each(|(idx, h)| { | ||||||
|  |                 info!( | ||||||
|  |                     "Found host index {idx} with role {role:?} => \n{}\n{}", | ||||||
|  |                     h.summary(), | ||||||
|  |                     h.parts_list() | ||||||
|  |                 ) | ||||||
|  |             }); | ||||||
|  |         } | ||||||
|  |         Ok(Outcome::success( | ||||||
|  |             "Inventory inspection complete".to_string(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("InspectInventoryInterpret") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         todo!() | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -1,3 +1,7 @@ | |||||||
|  | mod discovery; | ||||||
|  | pub mod inspect; | ||||||
|  | pub use discovery::*; | ||||||
|  | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use harmony_inventory_agent::local_presence::DiscoveryEvent; | use harmony_inventory_agent::local_presence::DiscoveryEvent; | ||||||
| use log::{debug, info, trace}; | use log::{debug, info, trace}; | ||||||
| @ -18,11 +22,11 @@ use harmony_types::id::Id; | |||||||
| /// This will allow us to register/update hosts running harmony_inventory_agent
 | /// This will allow us to register/update hosts running harmony_inventory_agent
 | ||||||
| /// from LAN in the Harmony inventory
 | /// from LAN in the Harmony inventory
 | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
| pub struct DiscoverInventoryAgentScore { | pub struct LaunchDiscoverInventoryAgentScore { | ||||||
|     pub discovery_timeout: Option<u64>, |     pub discovery_timeout: Option<u64>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: Topology> Score<T> for DiscoverInventoryAgentScore { | impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore { | ||||||
|     fn name(&self) -> String { |     fn name(&self) -> String { | ||||||
|         "DiscoverInventoryAgentScore".to_string() |         "DiscoverInventoryAgentScore".to_string() | ||||||
|     } |     } | ||||||
| @ -36,7 +40,7 @@ impl<T: Topology> Score<T> for DiscoverInventoryAgentScore { | |||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| struct DiscoverInventoryAgentInterpret { | struct DiscoverInventoryAgentInterpret { | ||||||
|     score: DiscoverInventoryAgentScore, |     score: LaunchDiscoverInventoryAgentScore, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| @ -46,6 +50,13 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret { | |||||||
|         _inventory: &Inventory, |         _inventory: &Inventory, | ||||||
|         _topology: &T, |         _topology: &T, | ||||||
|     ) -> Result<Outcome, InterpretError> { |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         match self.score.discovery_timeout { | ||||||
|  |             Some(timeout) => info!("Discovery agent will wait for {timeout} seconds"), | ||||||
|  |             None => info!( | ||||||
|  |                 "Discovery agent will wait forever in the background, go on and enjoy this delicious inventory." | ||||||
|  |             ), | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|         harmony_inventory_agent::local_presence::discover_agents( |         harmony_inventory_agent::local_presence::discover_agents( | ||||||
|             self.score.discovery_timeout, |             self.score.discovery_timeout, | ||||||
|             |event: DiscoveryEvent| -> Result<(), String> { |             |event: DiscoveryEvent| -> Result<(), String> { | ||||||
|  | |||||||
| @ -1,67 +0,0 @@ | |||||||
| use async_trait::async_trait; |  | ||||||
| use derive_new::new; |  | ||||||
| use serde::Serialize; |  | ||||||
| 
 |  | ||||||
| use crate::{ |  | ||||||
|     data::Version, |  | ||||||
|     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, |  | ||||||
|     inventory::Inventory, |  | ||||||
|     score::Score, |  | ||||||
|     topology::Topology, |  | ||||||
| }; |  | ||||||
| use harmony_types::id::Id; |  | ||||||
| 
 |  | ||||||
| #[derive(Debug, new, Clone, Serialize)] |  | ||||||
| pub struct IpxeScore { |  | ||||||
|     //files_to_serve: Url,
 |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl<T: Topology> Score<T> for IpxeScore { |  | ||||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { |  | ||||||
|         Box::new(IpxeInterpret::new(self.clone())) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     fn name(&self) -> String { |  | ||||||
|         "IpxeScore".to_string() |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[derive(Debug, new, Clone)] |  | ||||||
| pub struct IpxeInterpret { |  | ||||||
|     _score: IpxeScore, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[async_trait] |  | ||||||
| impl<T: Topology> Interpret<T> for IpxeInterpret { |  | ||||||
|     async fn execute( |  | ||||||
|         &self, |  | ||||||
|         _inventory: &Inventory, |  | ||||||
|         _topology: &T, |  | ||||||
|     ) -> Result<Outcome, InterpretError> { |  | ||||||
|         /* |  | ||||||
|         let http_server = &topology.http_server; |  | ||||||
|         http_server.ensure_initialized().await?; |  | ||||||
|         Ok(Outcome::success(format!( |  | ||||||
|             "Http Server running and serving files from {}", |  | ||||||
|             self.score.files_to_serve |  | ||||||
|         ))) |  | ||||||
|         */ |  | ||||||
|         todo!(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     fn get_name(&self) -> InterpretName { |  | ||||||
|         InterpretName::Ipxe |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     fn get_version(&self) -> Version { |  | ||||||
|         todo!() |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     fn get_status(&self) -> InterpretStatus { |  | ||||||
|         todo!() |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     fn get_children(&self) -> Vec<Id> { |  | ||||||
|         todo!() |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| @ -6,7 +6,6 @@ pub mod dummy; | |||||||
| pub mod helm; | pub mod helm; | ||||||
| pub mod http; | pub mod http; | ||||||
| pub mod inventory; | pub mod inventory; | ||||||
| pub mod ipxe; |  | ||||||
| pub mod k3d; | pub mod k3d; | ||||||
| pub mod k8s; | pub mod k8s; | ||||||
| pub mod lamp; | pub mod lamp; | ||||||
|  | |||||||
							
								
								
									
										120
									
								
								harmony/src/modules/okd/bootstrap_01_prepare.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								harmony/src/modules/okd/bootstrap_01_prepare.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,120 @@ | |||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{error, info, warn}; | ||||||
|  | use serde::Serialize; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::Version, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::inventory::{DiscoverHostForRoleScore, LaunchDiscoverInventoryAgentScore}, | ||||||
|  |     score::Score, | ||||||
|  |     topology::HAClusterTopology, | ||||||
|  | }; | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | // Step 01: Inventory (default PXE + Kickstart in RAM + Rust agent)
 | ||||||
|  | // - This score exposes/ensures the default inventory assets and waits for discoveries.
 | ||||||
|  | // - No early bonding. Simple access DHCP.
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup01InventoryScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup01InventoryScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup01InventoryInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup01InventoryScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup01InventoryInterpret { | ||||||
|  |     score: OKDSetup01InventoryScore, | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup01InventoryInterpret { | ||||||
|  |     pub fn new(score: OKDSetup01InventoryScore) -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             score, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup01InventoryInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup01Inventory") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         info!("Setting up base DNS config for OKD"); | ||||||
|  |         let cluster_domain = &topology.domain_name; | ||||||
|  |         let load_balancer_ip = &topology.load_balancer.get_ip(); | ||||||
|  |         inquire::Confirm::new(&format!( | ||||||
|  |             "Set hostnames manually in your opnsense dnsmasq config :
 | ||||||
|  | *.apps.{cluster_domain} -> {load_balancer_ip} | ||||||
|  | api.{cluster_domain} -> {load_balancer_ip} | ||||||
|  | api-int.{cluster_domain} -> {load_balancer_ip} | ||||||
|  | 
 | ||||||
|  | When you can dig them, confirm to continue. | ||||||
|  | " | ||||||
|  |         )) | ||||||
|  |         .prompt() | ||||||
|  |         .expect("Prompt error"); | ||||||
|  |         // TODO reactivate automatic dns config when migration from unbound to dnsmasq is done
 | ||||||
|  |         // OKDDnsScore::new(topology)
 | ||||||
|  |         //     .interpret(inventory, topology)
 | ||||||
|  |         //     .await?;
 | ||||||
|  | 
 | ||||||
|  |         // TODO refactor this section into a function discover_hosts_for_role(...) that can be used
 | ||||||
|  |         // from anywhere in the project, not a member of this struct
 | ||||||
|  | 
 | ||||||
|  |         let mut bootstrap_host: Option<PhysicalHost> = None; | ||||||
|  |         let repo = InventoryRepositoryFactory::build().await?; | ||||||
|  | 
 | ||||||
|  |         while bootstrap_host.is_none() { | ||||||
|  |             let hosts = repo.get_host_for_role(&HostRole::Bootstrap).await?; | ||||||
|  |             bootstrap_host = hosts.into_iter().next().to_owned(); | ||||||
|  |             DiscoverHostForRoleScore { | ||||||
|  |                 role: HostRole::Bootstrap, | ||||||
|  |             } | ||||||
|  |             .interpret(inventory, topology) | ||||||
|  |             .await?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             format!( | ||||||
|  |                 "Found and assigned bootstrap node: {}", | ||||||
|  |                 bootstrap_host.unwrap().summary() | ||||||
|  |             ), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										387
									
								
								harmony/src/modules/okd/bootstrap_02_bootstrap.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										387
									
								
								harmony/src/modules/okd/bootstrap_02_bootstrap.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,387 @@ | |||||||
|  | use std::{fmt::Write, path::PathBuf}; | ||||||
|  | 
 | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{debug, error, info, warn}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     config::secret::{RedhatSecret, SshKeyPair}, | ||||||
|  |     data::{FileContent, FilePath, Version}, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     instrumentation::{HarmonyEvent, instrument}, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::{ | ||||||
|  |         dhcp::DhcpHostBindingScore, | ||||||
|  |         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||||
|  |         inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|  |         okd::{ | ||||||
|  |             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||||
|  |             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||||
|  |         }, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{HAClusterTopology, HostBinding}, | ||||||
|  | }; | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | // Step 02: Bootstrap
 | ||||||
|  | // - Select bootstrap node (from discovered set).
 | ||||||
|  | // - Render per-MAC iPXE pointing to OKD 4.19 SCOS live assets + bootstrap ignition.
 | ||||||
|  | // - Reboot the host via SSH and wait for bootstrap-complete.
 | ||||||
|  | // - No bonding at this stage unless absolutely required; prefer persistence via MC later.
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup02BootstrapScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup02BootstrapScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup02BootstrapInterpret::new()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup02BootstrapScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup02BootstrapInterpret { | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup02BootstrapInterpret { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn get_bootstrap_node(&self) -> Result<PhysicalHost, InterpretError> { | ||||||
|  |         let repo = InventoryRepositoryFactory::build().await?; | ||||||
|  |         match repo | ||||||
|  |             .get_host_for_role(&HostRole::Bootstrap) | ||||||
|  |             .await? | ||||||
|  |             .into_iter() | ||||||
|  |             .next() | ||||||
|  |         { | ||||||
|  |             Some(host) => Ok(host), | ||||||
|  |             None => Err(InterpretError::new( | ||||||
|  |                 "No bootstrap node available".to_string(), | ||||||
|  |             )), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn prepare_ignition_files( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         let okd_bin_path = PathBuf::from("./data/okd/bin"); | ||||||
|  |         let okd_installation_path_str = | ||||||
|  |             format!("./data/okd/installation_files_{}", inventory.location.name); | ||||||
|  |         let okd_images_path = &PathBuf::from("./data/okd/installer_image/"); | ||||||
|  |         let okd_installation_path = &PathBuf::from(okd_installation_path_str); | ||||||
|  | 
 | ||||||
|  |         let exit_status = Command::new("mkdir") | ||||||
|  |             .arg("-p") | ||||||
|  |             .arg(okd_installation_path) | ||||||
|  |             .spawn() | ||||||
|  |             .expect("Command failed to start") | ||||||
|  |             .wait() | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| { | ||||||
|  |                 InterpretError::new(format!("Failed to create okd installation directory : {e}")) | ||||||
|  |             })?; | ||||||
|  |         if !exit_status.success() { | ||||||
|  |             return Err(InterpretError::new(format!( | ||||||
|  |                 "Failed to create okd installation directory" | ||||||
|  |             ))); | ||||||
|  |         } else { | ||||||
|  |             info!( | ||||||
|  |                 "Created OKD installation directory {}", | ||||||
|  |                 okd_installation_path.to_string_lossy() | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         let redhat_secret = SecretManager::get_or_prompt::<RedhatSecret>().await?; | ||||||
|  |         let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await?; | ||||||
|  | 
 | ||||||
|  |         let install_config_yaml = InstallConfigYaml { | ||||||
|  |             cluster_name: &topology.get_cluster_name(), | ||||||
|  |             cluster_domain: &topology.get_cluster_base_domain(), | ||||||
|  |             pull_secret: &redhat_secret.pull_secret, | ||||||
|  |             ssh_public_key: &ssh_key.public, | ||||||
|  |         } | ||||||
|  |         .to_string(); | ||||||
|  | 
 | ||||||
|  |         let install_config_file_path = &okd_installation_path.join("install-config.yaml"); | ||||||
|  | 
 | ||||||
|  |         self.create_file(install_config_file_path, install_config_yaml.as_bytes()) | ||||||
|  |             .await?; | ||||||
|  | 
 | ||||||
|  |         let install_config_backup_extension = install_config_file_path | ||||||
|  |             .extension() | ||||||
|  |             .map(|e| format!("{}.bak", e.to_string_lossy())) | ||||||
|  |             .unwrap_or("bak".to_string()); | ||||||
|  | 
 | ||||||
|  |         let mut install_config_backup = install_config_file_path.clone(); | ||||||
|  |         install_config_backup.set_extension(install_config_backup_extension); | ||||||
|  | 
 | ||||||
|  |         self.create_file(&install_config_backup, install_config_yaml.as_bytes()) | ||||||
|  |             .await?; | ||||||
|  | 
 | ||||||
|  |         info!("Creating manifest files with openshift-install"); | ||||||
|  |         let output = Command::new(okd_bin_path.join("openshift-install")) | ||||||
|  |             .args([ | ||||||
|  |                 "create", | ||||||
|  |                 "manifests", | ||||||
|  |                 "--dir", | ||||||
|  |                 okd_installation_path.to_str().unwrap(), | ||||||
|  |             ]) | ||||||
|  |             .output() | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| InterpretError::new(format!("Failed to create okd manifest : {e}")))?; | ||||||
|  |         let stdout = String::from_utf8(output.stdout).unwrap(); | ||||||
|  |         info!("openshift-install stdout :\n\n{}", stdout); | ||||||
|  |         let stderr = String::from_utf8(output.stderr).unwrap(); | ||||||
|  |         info!("openshift-install stderr :\n\n{}", stderr); | ||||||
|  |         info!("openshift-install exit status : {}", output.status); | ||||||
|  |         if !output.status.success() { | ||||||
|  |             return Err(InterpretError::new(format!( | ||||||
|  |                 "Failed to create okd manifest, exit code {} : {}", | ||||||
|  |                 output.status, stderr | ||||||
|  |             ))); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         info!("Creating ignition files with openshift-install"); | ||||||
|  |         let output = Command::new(okd_bin_path.join("openshift-install")) | ||||||
|  |             .args([ | ||||||
|  |                 "create", | ||||||
|  |                 "ignition-configs", | ||||||
|  |                 "--dir", | ||||||
|  |                 okd_installation_path.to_str().unwrap(), | ||||||
|  |             ]) | ||||||
|  |             .output() | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| { | ||||||
|  |                 InterpretError::new(format!("Failed to create okd ignition config : {e}")) | ||||||
|  |             })?; | ||||||
|  |         let stdout = String::from_utf8(output.stdout).unwrap(); | ||||||
|  |         info!("openshift-install stdout :\n\n{}", stdout); | ||||||
|  |         let stderr = String::from_utf8(output.stderr).unwrap(); | ||||||
|  |         info!("openshift-install stderr :\n\n{}", stderr); | ||||||
|  |         info!("openshift-install exit status : {}", output.status); | ||||||
|  |         if !output.status.success() { | ||||||
|  |             return Err(InterpretError::new(format!( | ||||||
|  |                 "Failed to create okd manifest, exit code {} : {}", | ||||||
|  |                 output.status, stderr | ||||||
|  |             ))); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         let ignition_files_http_path = PathBuf::from("okd_ignition_files"); | ||||||
|  |         let prepare_file_content = async |filename: &str| -> Result<FileContent, InterpretError> { | ||||||
|  |             let local_path = okd_installation_path.join(filename); | ||||||
|  |             let remote_path = ignition_files_http_path.join(filename); | ||||||
|  | 
 | ||||||
|  |             info!( | ||||||
|  |                 "Preparing file content for local file : {} to remote : {}", | ||||||
|  |                 local_path.to_string_lossy(), | ||||||
|  |                 remote_path.to_string_lossy() | ||||||
|  |             ); | ||||||
|  | 
 | ||||||
|  |             let content = tokio::fs::read_to_string(&local_path).await.map_err(|e| { | ||||||
|  |                 InterpretError::new(format!( | ||||||
|  |                     "Could not read file content {} : {e}", | ||||||
|  |                     local_path.to_string_lossy() | ||||||
|  |                 )) | ||||||
|  |             })?; | ||||||
|  | 
 | ||||||
|  |             Ok(FileContent { | ||||||
|  |                 path: FilePath::Relative(remote_path.to_string_lossy().to_string()), | ||||||
|  |                 content, | ||||||
|  |             }) | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         StaticFilesHttpScore { | ||||||
|  |             remote_path: None, | ||||||
|  |             folder_to_serve: None, | ||||||
|  |             files: vec![ | ||||||
|  |                 prepare_file_content("bootstrap.ign").await?, | ||||||
|  |                 prepare_file_content("master.ign").await?, | ||||||
|  |                 prepare_file_content("worker.ign").await?, | ||||||
|  |                 prepare_file_content("metadata.json").await?, | ||||||
|  |             ], | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         info!("Successfully prepared ignition files for OKD installation"); | ||||||
|  |         // ignition_files_http_path // = PathBuf::from("okd_ignition_files");
 | ||||||
|  |         info!( | ||||||
|  |             r#"Uploading images, they can be refreshed with a command similar to this one: openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel.|initramfs.|rootfs.)\w+(\.img)?"'  | grep x86_64 | xargs -n 1 curl -LO"# | ||||||
|  |         ); | ||||||
|  | 
 | ||||||
|  |         inquire::Confirm::new( | ||||||
|  |             &format!("push installer image files with `scp -r {}/* root@{}:/usr/local/http/scos/` until performance issue is resolved", okd_images_path.to_string_lossy(), topology.http_server.get_ip())).prompt().expect("Prompt error"); | ||||||
|  | 
 | ||||||
|  |         // let scos_http_path = PathBuf::from("scos");
 | ||||||
|  |         // StaticFilesHttpScore {
 | ||||||
|  |         //     folder_to_serve: Some(Url::LocalFolder(
 | ||||||
|  |         //         okd_images_path.to_string_lossy().to_string(),
 | ||||||
|  |         //     )),
 | ||||||
|  |         //     remote_path: Some(scos_http_path.to_string_lossy().to_string()),
 | ||||||
|  |         //     files: vec![],
 | ||||||
|  |         // }
 | ||||||
|  |         // .interpret(inventory, topology)
 | ||||||
|  |         // .await?;
 | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn configure_host_binding( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         let binding = HostBinding { | ||||||
|  |             logical_host: topology.bootstrap_host.clone(), | ||||||
|  |             physical_host: self.get_bootstrap_node().await?, | ||||||
|  |         }; | ||||||
|  |         info!("Configuring host binding for bootstrap node {binding:?}"); | ||||||
|  | 
 | ||||||
|  |         DhcpHostBindingScore { | ||||||
|  |             host_binding: vec![binding], | ||||||
|  |             domain: Some(topology.domain_name.clone()), | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn render_per_mac_pxe( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         let content = BootstrapIpxeTpl { | ||||||
|  |             http_ip: &topology.http_server.get_ip().to_string(), | ||||||
|  |             scos_path: "scos",                        // TODO use some constant
 | ||||||
|  |             ignition_http_path: "okd_ignition_files", // TODO use proper variable
 | ||||||
|  |             installation_device: "/dev/sda", | ||||||
|  |             ignition_file_name: "bootstrap.ign", | ||||||
|  |         } | ||||||
|  |         .to_string(); | ||||||
|  | 
 | ||||||
|  |         let bootstrap_node = self.get_bootstrap_node().await?; | ||||||
|  |         let mac_address = bootstrap_node.get_mac_address(); | ||||||
|  | 
 | ||||||
|  |         info!("[Bootstrap] Rendering per-MAC PXE for bootstrap node"); | ||||||
|  |         debug!("bootstrap ipxe content : {content}"); | ||||||
|  |         debug!("bootstrap mac addresses : {mac_address:?}"); | ||||||
|  | 
 | ||||||
|  |         IPxeMacBootFileScore { | ||||||
|  |             mac_address, | ||||||
|  |             content, | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn setup_bootstrap_load_balancer( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         let outcome = OKDBootstrapLoadBalancerScore::new(topology) | ||||||
|  |             .interpret(inventory, topology) | ||||||
|  |             .await?; | ||||||
|  |         info!("Successfully executed OKDBootstrapLoadBalancerScore : {outcome:?}"); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn reboot_target(&self) -> Result<(), InterpretError> { | ||||||
|  |         // Placeholder: ssh reboot using the inventory ephemeral key
 | ||||||
|  |         info!("[Bootstrap] Rebooting bootstrap node via SSH"); | ||||||
|  |         // TODO reboot programatically, there are some logical checks and refactoring to do such as
 | ||||||
|  |         // accessing the bootstrap node config (ip address) from the inventory
 | ||||||
|  |         let confirmation = inquire::Confirm::new( | ||||||
|  |                 "Now reboot the bootstrap node so it picks up its pxe boot file. Press enter when ready.", | ||||||
|  |         ) | ||||||
|  |         .prompt() | ||||||
|  |         .expect("Unexpected prompt error"); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn wait_for_bootstrap_complete(&self) -> Result<(), InterpretError> { | ||||||
|  |         // Placeholder: wait-for bootstrap-complete
 | ||||||
|  |         info!("[Bootstrap] Waiting for bootstrap-complete …"); | ||||||
|  |         todo!("[Bootstrap] Waiting for bootstrap-complete …") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn create_file(&self, path: &PathBuf, content: &[u8]) -> Result<(), InterpretError> { | ||||||
|  |         let mut install_config_file = File::create(path).await.map_err(|e| { | ||||||
|  |             InterpretError::new(format!( | ||||||
|  |                 "Could not create file {} : {e}", | ||||||
|  |                 path.to_string_lossy() | ||||||
|  |             )) | ||||||
|  |         })?; | ||||||
|  |         install_config_file.write(content).await.map_err(|e| { | ||||||
|  |             InterpretError::new(format!( | ||||||
|  |                 "Could not write file {} : {e}", | ||||||
|  |                 path.to_string_lossy() | ||||||
|  |             )) | ||||||
|  |         })?; | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup02BootstrapInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup02Bootstrap") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         self.configure_host_binding(inventory, topology).await?; | ||||||
|  |         self.prepare_ignition_files(inventory, topology).await?; | ||||||
|  |         self.render_per_mac_pxe(inventory, topology).await?; | ||||||
|  |         self.setup_bootstrap_load_balancer(inventory, topology) | ||||||
|  |             .await?; | ||||||
|  | 
 | ||||||
|  |         // TODO https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-user-provisioned-validating-dns_installing-bare-metal
 | ||||||
|  |         // self.validate_dns_config(inventory, topology).await?;
 | ||||||
|  | 
 | ||||||
|  |         self.reboot_target().await?; | ||||||
|  |         self.wait_for_bootstrap_complete().await?; | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             "Bootstrap phase complete".into(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										277
									
								
								harmony/src/modules/okd/bootstrap_03_control_plane.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										277
									
								
								harmony/src/modules/okd/bootstrap_03_control_plane.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,277 @@ | |||||||
|  | use std::{fmt::Write, path::PathBuf}; | ||||||
|  | 
 | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{debug, info}; | ||||||
|  | use serde::Serialize; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     data::Version, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::{ | ||||||
|  |         dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, | ||||||
|  |         inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{HAClusterTopology, HostBinding}, | ||||||
|  | }; | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | // Step 03: Control Plane
 | ||||||
|  | // - Render per-MAC PXE & ignition for cp0/cp1/cp2.
 | ||||||
|  | // - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup03ControlPlaneScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup03ControlPlaneScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup03ControlPlaneInterpret { | ||||||
|  |     score: OKDSetup03ControlPlaneScore, | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup03ControlPlaneInterpret { | ||||||
|  |     pub fn new(score: OKDSetup03ControlPlaneScore) -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             score, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Ensures that three physical hosts are discovered and available for the ControlPlane role.
 | ||||||
|  |     /// It will trigger discovery if not enough hosts are found.
 | ||||||
|  |     async fn get_nodes( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Vec<PhysicalHost>, InterpretError> { | ||||||
|  |         const REQUIRED_HOSTS: usize = 3; | ||||||
|  |         let repo = InventoryRepositoryFactory::build().await?; | ||||||
|  |         let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; | ||||||
|  | 
 | ||||||
|  |         while control_plane_hosts.len() < REQUIRED_HOSTS { | ||||||
|  |             info!( | ||||||
|  |                 "Discovery of {} control plane hosts in progress, current number {}", | ||||||
|  |                 REQUIRED_HOSTS, | ||||||
|  |                 control_plane_hosts.len() | ||||||
|  |             ); | ||||||
|  |             // This score triggers the discovery agent for a specific role.
 | ||||||
|  |             DiscoverHostForRoleScore { | ||||||
|  |                 role: HostRole::ControlPlane, | ||||||
|  |             } | ||||||
|  |             .interpret(inventory, topology) | ||||||
|  |             .await?; | ||||||
|  |             control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         if control_plane_hosts.len() < REQUIRED_HOSTS { | ||||||
|  |             Err(InterpretError::new(format!( | ||||||
|  |                 "OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.", | ||||||
|  |                 REQUIRED_HOSTS, | ||||||
|  |                 control_plane_hosts.len() | ||||||
|  |             ))) | ||||||
|  |         } else { | ||||||
|  |             // Take exactly the number of required hosts to ensure consistency.
 | ||||||
|  |             Ok(control_plane_hosts | ||||||
|  |                 .into_iter() | ||||||
|  |                 .take(REQUIRED_HOSTS) | ||||||
|  |                 .collect()) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Configures DHCP host bindings for all control plane nodes.
 | ||||||
|  |     async fn configure_host_binding( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |         nodes: &Vec<PhysicalHost>, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         info!("[ControlPlane] Configuring host bindings for control plane nodes."); | ||||||
|  | 
 | ||||||
|  |         // Ensure the topology definition matches the number of physical nodes found.
 | ||||||
|  |         if topology.control_plane.len() != nodes.len() { | ||||||
|  |             return Err(InterpretError::new(format!( | ||||||
|  |                 "Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).", | ||||||
|  |                 topology.control_plane.len(), | ||||||
|  |                 nodes.len() | ||||||
|  |             ))); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Create a binding for each physical host to its corresponding logical host.
 | ||||||
|  |         let bindings: Vec<HostBinding> = topology | ||||||
|  |             .control_plane | ||||||
|  |             .iter() | ||||||
|  |             .zip(nodes.iter()) | ||||||
|  |             .map(|(logical_host, physical_host)| { | ||||||
|  |                 info!( | ||||||
|  |                     "Creating binding: Logical Host '{}' -> Physical Host ID '{}'", | ||||||
|  |                     logical_host.name, physical_host.id | ||||||
|  |                 ); | ||||||
|  |                 HostBinding { | ||||||
|  |                     logical_host: logical_host.clone(), | ||||||
|  |                     physical_host: physical_host.clone(), | ||||||
|  |                 } | ||||||
|  |             }) | ||||||
|  |             .collect(); | ||||||
|  | 
 | ||||||
|  |         DhcpHostBindingScore { | ||||||
|  |             host_binding: bindings, | ||||||
|  |             domain: Some(topology.domain_name.clone()), | ||||||
|  |         } | ||||||
|  |         .interpret(inventory, topology) | ||||||
|  |         .await?; | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Renders and deploys a per-MAC iPXE boot file for each control plane node.
 | ||||||
|  |     async fn configure_ipxe( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |         nodes: &Vec<PhysicalHost>, | ||||||
|  |     ) -> Result<(), InterpretError> { | ||||||
|  |         info!("[ControlPlane] Rendering per-MAC iPXE configurations."); | ||||||
|  | 
 | ||||||
|  |         // The iPXE script content is the same for all control plane nodes,
 | ||||||
|  |         // pointing to the 'master.ign' ignition file.
 | ||||||
|  |         let content = BootstrapIpxeTpl { | ||||||
|  |             http_ip: &topology.http_server.get_ip().to_string(), | ||||||
|  |             scos_path: "scos", | ||||||
|  |             ignition_http_path: "okd_ignition_files", | ||||||
|  |             installation_device: "/dev/sda", // This might need to be configurable per-host in the future
 | ||||||
|  |             ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
 | ||||||
|  |         } | ||||||
|  |         .to_string(); | ||||||
|  | 
 | ||||||
|  |         debug!("[ControlPlane] iPXE content template:\n{}", content); | ||||||
|  | 
 | ||||||
|  |         // Create and apply an iPXE boot file for each node.
 | ||||||
|  |         for node in nodes { | ||||||
|  |             let mac_address = node.get_mac_address(); | ||||||
|  |             if mac_address.is_empty() { | ||||||
|  |                 return Err(InterpretError::new(format!( | ||||||
|  |                     "Physical host with ID '{}' has no MAC addresses defined.", | ||||||
|  |                     node.id | ||||||
|  |                 ))); | ||||||
|  |             } | ||||||
|  |             info!( | ||||||
|  |                 "[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}", | ||||||
|  |                 node.id, mac_address | ||||||
|  |             ); | ||||||
|  | 
 | ||||||
|  |             IPxeMacBootFileScore { | ||||||
|  |                 mac_address, | ||||||
|  |                 content: content.clone(), | ||||||
|  |             } | ||||||
|  |             .interpret(inventory, topology) | ||||||
|  |             .await?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Prompts the user to reboot the target control plane nodes.
 | ||||||
|  |     async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> { | ||||||
|  |         let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect(); | ||||||
|  |         info!( | ||||||
|  |             "[ControlPlane] Requesting reboot for control plane nodes: {:?}", | ||||||
|  |             node_ids | ||||||
|  |         ); | ||||||
|  | 
 | ||||||
|  |         let confirmation = inquire::Confirm::new( | ||||||
|  |                 &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), | ||||||
|  |         ) | ||||||
|  |         .prompt() | ||||||
|  |         .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; | ||||||
|  | 
 | ||||||
|  |         if !confirmation { | ||||||
|  |             return Err(InterpretError::new( | ||||||
|  |                 "User aborted the operation.".to_string(), | ||||||
|  |             )); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Placeholder for automating network bonding configuration.
 | ||||||
|  |     async fn persist_network_bond(&self) -> Result<(), InterpretError> { | ||||||
|  |         // Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
 | ||||||
|  |         info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP"); | ||||||
|  |         inquire::Confirm::new( | ||||||
|  |             "Network configuration for control plane nodes is not automated yet. Configure it manually if needed.", | ||||||
|  |         ) | ||||||
|  |         .prompt() | ||||||
|  |         .map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?; | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup03ControlPlane") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         inventory: &Inventory, | ||||||
|  |         topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         // 1. Ensure we have 3 physical hosts for the control plane.
 | ||||||
|  |         let nodes = self.get_nodes(inventory, topology).await?; | ||||||
|  | 
 | ||||||
|  |         // 2. Create DHCP reservations for the control plane nodes.
 | ||||||
|  |         self.configure_host_binding(inventory, topology, &nodes) | ||||||
|  |             .await?; | ||||||
|  | 
 | ||||||
|  |         // 3. Create iPXE files for each control plane node to boot from the master ignition.
 | ||||||
|  |         self.configure_ipxe(inventory, topology, &nodes).await?; | ||||||
|  | 
 | ||||||
|  |         // 4. Reboot the nodes to start the OS installation.
 | ||||||
|  |         self.reboot_targets(&nodes).await?; | ||||||
|  | 
 | ||||||
|  |         // 5. Placeholder for post-boot network configuration (e.g., bonding).
 | ||||||
|  |         self.persist_network_bond().await?; | ||||||
|  | 
 | ||||||
|  |         // TODO: Implement a step to wait for the control plane nodes to join the cluster
 | ||||||
|  |         // and for the cluster operators to become available. This would be similar to
 | ||||||
|  |         // the `wait-for bootstrap-complete` command.
 | ||||||
|  |         info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually."); | ||||||
|  | 
 | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             "Control plane provisioning has been successfully initiated.".into(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										102
									
								
								harmony/src/modules/okd/bootstrap_04_workers.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								harmony/src/modules/okd/bootstrap_04_workers.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,102 @@ | |||||||
|  | use std::{fmt::Write, path::PathBuf}; | ||||||
|  | 
 | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{debug, error, info, warn}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     config::secret::{RedhatSecret, SshKeyPair}, | ||||||
|  |     data::{FileContent, FilePath, Version}, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     instrumentation::{HarmonyEvent, instrument}, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::{ | ||||||
|  |         dhcp::DhcpHostBindingScore, | ||||||
|  |         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||||
|  |         inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|  |         okd::{ | ||||||
|  |             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||||
|  |             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||||
|  |         }, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{HAClusterTopology, HostBinding}, | ||||||
|  | }; | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | // Step 04: Workers
 | ||||||
|  | // - Render per-MAC PXE & ignition for workers; join nodes.
 | ||||||
|  | // - Persist bonding via MC/NNCP as required (same approach as masters).
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup04WorkersScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup04WorkersScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup04WorkersInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup04WorkersScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup04WorkersInterpret { | ||||||
|  |     score: OKDSetup04WorkersScore, | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup04WorkersInterpret { | ||||||
|  |     pub fn new(score: OKDSetup04WorkersScore) -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             score, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn render_and_reboot(&self) -> Result<(), InterpretError> { | ||||||
|  |         info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup04Workers") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         _topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         self.render_and_reboot().await?; | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             "Workers provisioned".into(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										101
									
								
								harmony/src/modules/okd/bootstrap_05_sanity_check.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								harmony/src/modules/okd/bootstrap_05_sanity_check.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,101 @@ | |||||||
|  | use std::{fmt::Write, path::PathBuf}; | ||||||
|  | 
 | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{debug, error, info, warn}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     config::secret::{RedhatSecret, SshKeyPair}, | ||||||
|  |     data::{FileContent, FilePath, Version}, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     instrumentation::{HarmonyEvent, instrument}, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::{ | ||||||
|  |         dhcp::DhcpHostBindingScore, | ||||||
|  |         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||||
|  |         inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|  |         okd::{ | ||||||
|  |             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||||
|  |             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||||
|  |         }, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{HAClusterTopology, HostBinding}, | ||||||
|  | }; | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | // Step 05: Sanity Check
 | ||||||
|  | // - Validate API reachability, ClusterOperators, ingress, and SDN status.
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup05SanityCheckScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup05SanityCheckScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup05SanityCheckInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup05SanityCheckScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup05SanityCheckInterpret { | ||||||
|  |     score: OKDSetup05SanityCheckScore, | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup05SanityCheckInterpret { | ||||||
|  |     pub fn new(score: OKDSetup05SanityCheckScore) -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             score, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn run_checks(&self) -> Result<(), InterpretError> { | ||||||
|  |         info!("[Sanity] Checking API, COs, Ingress, and SDN health …"); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup05SanityCheckInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup05SanityCheck") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         _topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         self.run_checks().await?; | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             "Sanity checks passed".into(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										101
									
								
								harmony/src/modules/okd/bootstrap_06_installation_report.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								harmony/src/modules/okd/bootstrap_06_installation_report.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,101 @@ | |||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | use async_trait::async_trait; | ||||||
|  | use derive_new::new; | ||||||
|  | use harmony_secret::SecretManager; | ||||||
|  | use harmony_types::id::Id; | ||||||
|  | use log::{debug, error, info, warn}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use std::{fmt::Write, path::PathBuf}; | ||||||
|  | use tokio::{fs::File, io::AsyncWriteExt, process::Command}; | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     config::secret::{RedhatSecret, SshKeyPair}, | ||||||
|  |     data::{FileContent, FilePath, Version}, | ||||||
|  |     hardware::PhysicalHost, | ||||||
|  |     infra::inventory::InventoryRepositoryFactory, | ||||||
|  |     instrumentation::{HarmonyEvent, instrument}, | ||||||
|  |     interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, | ||||||
|  |     inventory::{HostRole, Inventory}, | ||||||
|  |     modules::{ | ||||||
|  |         dhcp::DhcpHostBindingScore, | ||||||
|  |         http::{IPxeMacBootFileScore, StaticFilesHttpScore}, | ||||||
|  |         inventory::LaunchDiscoverInventoryAgentScore, | ||||||
|  |         okd::{ | ||||||
|  |             bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, | ||||||
|  |             templates::{BootstrapIpxeTpl, InstallConfigYaml}, | ||||||
|  |         }, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::{HAClusterTopology, HostBinding}, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | // Step 06: Installation Report
 | ||||||
|  | // - Emit JSON and concise human summary of nodes, roles, versions, and health.
 | ||||||
|  | // -------------------------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone, Serialize, new)] | ||||||
|  | pub struct OKDSetup06InstallationReportScore {} | ||||||
|  | 
 | ||||||
|  | impl Score<HAClusterTopology> for OKDSetup06InstallationReportScore { | ||||||
|  |     fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> { | ||||||
|  |         Box::new(OKDSetup06InstallationReportInterpret::new(self.clone())) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn name(&self) -> String { | ||||||
|  |         "OKDSetup06InstallationReportScore".to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Debug, Clone)] | ||||||
|  | pub struct OKDSetup06InstallationReportInterpret { | ||||||
|  |     score: OKDSetup06InstallationReportScore, | ||||||
|  |     version: Version, | ||||||
|  |     status: InterpretStatus, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl OKDSetup06InstallationReportInterpret { | ||||||
|  |     pub fn new(score: OKDSetup06InstallationReportScore) -> Self { | ||||||
|  |         let version = Version::from("1.0.0").unwrap(); | ||||||
|  |         Self { | ||||||
|  |             version, | ||||||
|  |             score, | ||||||
|  |             status: InterpretStatus::QUEUED, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn generate(&self) -> Result<(), InterpretError> { | ||||||
|  |         info!("[Report] Generating OKD installation report",); | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[async_trait] | ||||||
|  | impl Interpret<HAClusterTopology> for OKDSetup06InstallationReportInterpret { | ||||||
|  |     fn get_name(&self) -> InterpretName { | ||||||
|  |         InterpretName::Custom("OKDSetup06InstallationReport") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_version(&self) -> Version { | ||||||
|  |         self.version.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_status(&self) -> InterpretStatus { | ||||||
|  |         self.status.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn get_children(&self) -> Vec<Id> { | ||||||
|  |         vec![] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     async fn execute( | ||||||
|  |         &self, | ||||||
|  |         _inventory: &Inventory, | ||||||
|  |         _topology: &HAClusterTopology, | ||||||
|  |     ) -> Result<Outcome, InterpretError> { | ||||||
|  |         self.generate().await?; | ||||||
|  |         Ok(Outcome::new( | ||||||
|  |             InterpretStatus::SUCCESS, | ||||||
|  |             "Installation report generated".into(), | ||||||
|  |         )) | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -37,21 +37,23 @@ impl OKDBootstrapDhcpScore { | |||||||
|                 .clone(), |                 .clone(), | ||||||
|         }); |         }); | ||||||
|         // TODO refactor this so it is not copy pasted from dhcp.rs
 |         // TODO refactor this so it is not copy pasted from dhcp.rs
 | ||||||
|         Self { |         todo!("Add dhcp range") | ||||||
|             dhcp_score: DhcpScore::new( |         // Self {
 | ||||||
|                 host_binding, |         //     dhcp_score: DhcpScore::new(
 | ||||||
|                 // TODO : we should add a tftp server to the topology instead of relying on the
 |         //         host_binding,
 | ||||||
|                 // router address, this is leaking implementation details
 |         //         // TODO : we should add a tftp server to the topology instead of relying on the
 | ||||||
|                 Some(topology.router.get_gateway()), |         //         // router address, this is leaking implementation details
 | ||||||
|                 None, // To allow UEFI boot we cannot provide a legacy file
 |         //         Some(topology.router.get_gateway()),
 | ||||||
|                 Some("undionly.kpxe".to_string()), |         //         None, // To allow UEFI boot we cannot provide a legacy file
 | ||||||
|                 Some("ipxe.efi".to_string()), |         //         Some("undionly.kpxe".to_string()),
 | ||||||
|                 Some(format!( |         //         Some("ipxe.efi".to_string()),
 | ||||||
|                     "http://{}:8080/boot.ipxe", |         //         Some(format!(
 | ||||||
|                     topology.router.get_gateway() |         //             "http://{}:8080/boot.ipxe",
 | ||||||
|                 )), |         //             topology.router.get_gateway()
 | ||||||
|             ), |         //         )),
 | ||||||
|         } |         //         (self.),
 | ||||||
|  |         //     ),
 | ||||||
|  |         // }
 | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -8,7 +8,7 @@ use crate::{ | |||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{ |     topology::{ | ||||||
|         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, |         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, | ||||||
|         LoadBalancerService, Topology, |         LoadBalancerService, SSL, Topology, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| @ -44,6 +44,7 @@ impl OKDBootstrapLoadBalancerScore { | |||||||
|                     "/readyz".to_string(), |                     "/readyz".to_string(), | ||||||
|                     HttpMethod::GET, |                     HttpMethod::GET, | ||||||
|                     HttpStatusCode::Success2xx, |                     HttpStatusCode::Success2xx, | ||||||
|  |                     SSL::SSL, | ||||||
|                 )), |                 )), | ||||||
|             }, |             }, | ||||||
|         ]; |         ]; | ||||||
| @ -54,6 +55,7 @@ impl OKDBootstrapLoadBalancerScore { | |||||||
|             }, |             }, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|     fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> { |     fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> { | ||||||
|         let mut backend: Vec<_> = topology |         let mut backend: Vec<_> = topology | ||||||
|             .control_plane |             .control_plane | ||||||
| @ -63,6 +65,14 @@ impl OKDBootstrapLoadBalancerScore { | |||||||
|                 port, |                 port, | ||||||
|             }) |             }) | ||||||
|             .collect(); |             .collect(); | ||||||
|  | 
 | ||||||
|  |         topology.workers.iter().for_each(|worker| { | ||||||
|  |             backend.push(BackendServer { | ||||||
|  |                 address: worker.ip.to_string(), | ||||||
|  |                 port, | ||||||
|  |             }) | ||||||
|  |         }); | ||||||
|  | 
 | ||||||
|         backend.push(BackendServer { |         backend.push(BackendServer { | ||||||
|             address: topology.bootstrap_host.ip.to_string(), |             address: topology.bootstrap_host.ip.to_string(), | ||||||
|             port, |             port, | ||||||
|  | |||||||
| @ -1,3 +1,6 @@ | |||||||
|  | use std::net::Ipv4Addr; | ||||||
|  | 
 | ||||||
|  | use harmony_types::net::IpAddress; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
| @ -44,6 +47,16 @@ impl OKDDhcpScore { | |||||||
|                 }) |                 }) | ||||||
|             }); |             }); | ||||||
| 
 | 
 | ||||||
|  |         let dhcp_server_ip = match topology.dhcp_server.get_ip() { | ||||||
|  |             std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, | ||||||
|  |             std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         // TODO this could overflow, we should use proper subnet maths here instead of an ip
 | ||||||
|  |         // address and guessing the subnet size from there
 | ||||||
|  |         let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); | ||||||
|  |         let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); | ||||||
|  | 
 | ||||||
|         Self { |         Self { | ||||||
|             // TODO : we should add a tftp server to the topology instead of relying on the
 |             // TODO : we should add a tftp server to the topology instead of relying on the
 | ||||||
|             // router address, this is leaking implementation details
 |             // router address, this is leaking implementation details
 | ||||||
| @ -57,6 +70,8 @@ impl OKDDhcpScore { | |||||||
|                     "http://{}:8080/boot.ipxe", |                     "http://{}:8080/boot.ipxe", | ||||||
|                     topology.router.get_gateway() |                     topology.router.get_gateway() | ||||||
|                 )), |                 )), | ||||||
|  |                 dhcp_range: (IpAddress::from(start), IpAddress::from(end)), | ||||||
|  |                 domain: Some(topology.domain_name.clone()), | ||||||
|             }, |             }, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | |||||||
							
								
								
									
										73
									
								
								harmony/src/modules/okd/installation.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								harmony/src/modules/okd/installation.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,73 @@ | |||||||
|  | //! OKDInstallationScore
 | ||||||
|  | //!
 | ||||||
|  | //! Overview
 | ||||||
|  | //! --------
 | ||||||
|  | //! OKDInstallationScore orchestrates an end-to-end, bare-metal OKD (OpenShift/OKD 4.19).
 | ||||||
|  | //! It follows principles of “discovery-first, then provision” strategy with strict ordering,
 | ||||||
|  | //! observable progress, and minimal assumptions about the underlying network.
 | ||||||
|  | //!
 | ||||||
|  | //! High-level flow
 | ||||||
|  | //! 1) OKDSetup01Inventory
 | ||||||
|  | //!    - Serve default iPXE + Kickstart (in-RAM CentOS Stream 9) for discovery only.
 | ||||||
|  | //!    - Enable SSH with the cluster’s pubkey, start a Rust inventory agent.
 | ||||||
|  | //!    - Harmony discovers nodes by scraping the agent endpoint and collects MACs/NICs.
 | ||||||
|  | //!
 | ||||||
|  | //! 2) OKDSetup02Bootstrap
 | ||||||
|  | //!    - User selects which discovered node becomes bootstrap.
 | ||||||
|  | //!    - Prepare the OKD cluster installation files
 | ||||||
|  | //!    - Render per-MAC iPXE for bootstrap with OKD 4.19 SCOS live assets + ignition.
 | ||||||
|  | //!    - Reboot node via SSH; install bootstrap; wait for bootstrap-complete.
 | ||||||
|  | //!
 | ||||||
|  | //! 3) OKDSetup03ControlPlane
 | ||||||
|  | //!    - Render per-MAC iPXE for cp0/cp1/cp2 with ignition. Reboot via SSH, join masters.
 | ||||||
|  | //!    - Configure network bond (where relevant) using OKD NMState MachineConfig
 | ||||||
|  | //!
 | ||||||
|  | //! 4) OKDSetup04Workers
 | ||||||
|  | //!    - Render per-MAC iPXE for worker set; join workers.
 | ||||||
|  | //!    - Configure network bond (where relevant) using OKD NMState MachineConfig
 | ||||||
|  | //!
 | ||||||
|  | //! 5) OKDSetup05SanityCheck
 | ||||||
|  | //!    - Validate API/ingress/clusteroperators; ensure healthy control plane and SDN.
 | ||||||
|  | //!
 | ||||||
|  | //! 6) OKDSetup06InstallationReport
 | ||||||
|  | //!    - Produce a concise, machine-readable report (JSON) and a human summary.
 | ||||||
|  | //!
 | ||||||
|  | //! Network notes
 | ||||||
|  | //! - During Inventory: ports must be simple access (no LACP). DHCP succeeds; iPXE
 | ||||||
|  | //!   loads CentOS Stream live with Kickstart and starts the inventory endpoint.
 | ||||||
|  | //! - During Provisioning: only after SCOS is on disk and Ignition/MC can be applied
 | ||||||
|  | //!   do we set the bond persistently. If early bonding is truly required on a host,
 | ||||||
|  | //!   use kernel args selectively in the per-MAC PXE for that host, but never for the
 | ||||||
|  | //!   generic discovery path.
 | ||||||
|  | //! - This is caused by the inherent race condition between PXE, which cannot perform
 | ||||||
|  | //!   its DHCP recovery process on a bonded network, and the bond configuration itself,
 | ||||||
|  | //!   which must be configured on host AND switch to connect properly.
 | ||||||
|  | //!
 | ||||||
|  | //! Configuration knobs
 | ||||||
|  | //! - public_domain: External wildcard/apps domain (e.g., apps.example.com).
 | ||||||
|  | //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
 | ||||||
|  | 
 | ||||||
|  | use crate::{ | ||||||
|  |     modules::okd::{ | ||||||
|  |         OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, | ||||||
|  |         OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, | ||||||
|  |         bootstrap_06_installation_report::OKDSetup06InstallationReportScore, | ||||||
|  |     }, | ||||||
|  |     score::Score, | ||||||
|  |     topology::HAClusterTopology, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub struct OKDInstallationPipeline; | ||||||
|  | 
 | ||||||
|  | impl OKDInstallationPipeline { | ||||||
|  |     pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> { | ||||||
|  |         vec![ | ||||||
|  |             Box::new(OKDSetup01InventoryScore::new()), | ||||||
|  |             Box::new(OKDSetup02BootstrapScore::new()), | ||||||
|  |             Box::new(OKDSetup03ControlPlaneScore::new()), | ||||||
|  |             Box::new(OKDSetup04WorkersScore::new()), | ||||||
|  |             Box::new(OKDSetup05SanityCheckScore::new()), | ||||||
|  |             Box::new(OKDSetup06InstallationReportScore::new()), | ||||||
|  |         ] | ||||||
|  |     } | ||||||
|  | } | ||||||
| @ -1,9 +1,9 @@ | |||||||
| use askama::Template; | use askama::Template; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use derive_new::new; | use derive_new::new; | ||||||
| use harmony_types::net::Url; | use harmony_types::net::{IpAddress, Url}; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
| use std::net::IpAddr; | use std::net::{IpAddr, Ipv4Addr}; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     data::{FileContent, FilePath, Version}, |     data::{FileContent, FilePath, Version}, | ||||||
| @ -16,29 +16,31 @@ use crate::{ | |||||||
| use harmony_types::id::Id; | use harmony_types::id::Id; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone, Serialize)] | #[derive(Debug, new, Clone, Serialize)] | ||||||
| pub struct OkdIpxeScore { | pub struct OKDIpxeScore { | ||||||
|     pub kickstart_filename: String, |     pub kickstart_filename: String, | ||||||
|     pub harmony_inventory_agent: String, |     pub harmony_inventory_agent: String, | ||||||
|     pub cluster_pubkey_filename: String, |     pub cluster_pubkey: FileContent, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OkdIpxeScore { | impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Score<T> for OKDIpxeScore { | ||||||
|     fn create_interpret(&self) -> Box<dyn Interpret<T>> { |     fn create_interpret(&self) -> Box<dyn Interpret<T>> { | ||||||
|         Box::new(IpxeInterpret::new(self.clone())) |         Box::new(OKDIpxeInterpret::new(self.clone())) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn name(&self) -> String { |     fn name(&self) -> String { | ||||||
|         "OkdIpxeScore".to_string() |         "OKDipxeScore".to_string() | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, new, Clone)] | #[derive(Debug, new, Clone)] | ||||||
| pub struct IpxeInterpret { | pub struct OKDIpxeInterpret { | ||||||
|     score: OkdIpxeScore, |     score: OKDIpxeScore, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[async_trait] | #[async_trait] | ||||||
| impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> for IpxeInterpret { | impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> | ||||||
|  |     for OKDIpxeInterpret | ||||||
|  | { | ||||||
|     async fn execute( |     async fn execute( | ||||||
|         &self, |         &self, | ||||||
|         inventory: &Inventory, |         inventory: &Inventory, | ||||||
| @ -46,19 +48,32 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f | |||||||
|     ) -> Result<Outcome, InterpretError> { |     ) -> Result<Outcome, InterpretError> { | ||||||
|         let gateway_ip = topology.get_gateway(); |         let gateway_ip = topology.get_gateway(); | ||||||
| 
 | 
 | ||||||
|  |         let dhcp_server_ip = match DhcpServer::get_ip(topology) { | ||||||
|  |             std::net::IpAddr::V4(ipv4_addr) => ipv4_addr, | ||||||
|  |             std::net::IpAddr::V6(_ipv6_addr) => todo!("Support ipv6 someday"), | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         // TODO this could overflow, we should use proper subnet maths here instead of an ip
 | ||||||
|  |         // address and guessing the subnet size from there
 | ||||||
|  |         let start = Ipv4Addr::from(u32::from(dhcp_server_ip) + 100); | ||||||
|  |         let end = Ipv4Addr::from(u32::from(dhcp_server_ip) + 150); | ||||||
|  | 
 | ||||||
|         let scores: Vec<Box<dyn Score<T>>> = vec![ |         let scores: Vec<Box<dyn Score<T>>> = vec![ | ||||||
|             Box::new(DhcpScore { |             Box::new(DhcpScore { | ||||||
|                 host_binding: vec![], |                 host_binding: vec![], | ||||||
|  |                 domain: None, | ||||||
|                 next_server: Some(topology.get_gateway()), |                 next_server: Some(topology.get_gateway()), | ||||||
|                 boot_filename: None, |                 boot_filename: None, | ||||||
|                 filename: Some("undionly.kpxe".to_string()), |                 filename: Some("undionly.kpxe".to_string()), | ||||||
|                 filename64: Some("ipxe.efi".to_string()), |                 filename64: Some("ipxe.efi".to_string()), | ||||||
|                 filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), |                 filenameipxe: Some(format!("http://{gateway_ip}:8080/boot.ipxe").to_string()), | ||||||
|  |                 dhcp_range: (IpAddress::from(start), IpAddress::from(end)), | ||||||
|             }), |             }), | ||||||
|             Box::new(TftpScore { |             Box::new(TftpScore { | ||||||
|                 files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), |                 files_to_serve: Url::LocalFolder("./data/pxe/okd/tftpboot/".to_string()), | ||||||
|             }), |             }), | ||||||
|             Box::new(StaticFilesHttpScore { |             Box::new(StaticFilesHttpScore { | ||||||
|  |                 remote_path: None, | ||||||
|                 // TODO The current russh based copy is way too slow, check for a lib update or use scp
 |                 // TODO The current russh based copy is way too slow, check for a lib update or use scp
 | ||||||
|                 // when available
 |                 // when available
 | ||||||
|                 //
 |                 //
 | ||||||
| @ -80,7 +95,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f | |||||||
|                         content: InventoryKickstartTpl { |                         content: InventoryKickstartTpl { | ||||||
|                             gateway_ip: &gateway_ip, |                             gateway_ip: &gateway_ip, | ||||||
|                             harmony_inventory_agent: &self.score.harmony_inventory_agent, |                             harmony_inventory_agent: &self.score.harmony_inventory_agent, | ||||||
|                             cluster_pubkey_filename: &self.score.cluster_pubkey_filename, |                             cluster_pubkey_filename: &self.score.cluster_pubkey.path.to_string(), | ||||||
|                         } |                         } | ||||||
|                         .to_string(), |                         .to_string(), | ||||||
|                     }, |                     }, | ||||||
| @ -92,6 +107,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f | |||||||
|                         } |                         } | ||||||
|                         .to_string(), |                         .to_string(), | ||||||
|                     }, |                     }, | ||||||
|  |                     self.score.cluster_pubkey.clone(), | ||||||
|                 ], |                 ], | ||||||
|             }), |             }), | ||||||
|         ]; |         ]; | ||||||
| @ -107,6 +123,7 @@ impl<T: Topology + DhcpServer + TftpServer + HttpServer + Router> Interpret<T> f | |||||||
|                 Err(e) => return Err(e), |                 Err(e) => return Err(e), | ||||||
|             }; |             }; | ||||||
|         } |         } | ||||||
|  |         inquire::Confirm::new(&format!("Execute the copy : `scp -r data/pxe/okd/http_files/* root@{}:/usr/local/http/` and confirm when done to continue", HttpServer::get_ip(topology))).prompt().expect("Prompt error"); | ||||||
| 
 | 
 | ||||||
|         Ok(Outcome::success("Ipxe installed".to_string())) |         Ok(Outcome::success("Ipxe installed".to_string())) | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -8,7 +8,7 @@ use crate::{ | |||||||
|     score::Score, |     score::Score, | ||||||
|     topology::{ |     topology::{ | ||||||
|         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, |         BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, | ||||||
|         LoadBalancerService, Topology, |         LoadBalancerService, SSL, Topology, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| @ -62,6 +62,7 @@ impl OKDLoadBalancerScore { | |||||||
|                     "/readyz".to_string(), |                     "/readyz".to_string(), | ||||||
|                     HttpMethod::GET, |                     HttpMethod::GET, | ||||||
|                     HttpStatusCode::Success2xx, |                     HttpStatusCode::Success2xx, | ||||||
|  |                     SSL::SSL, | ||||||
|                 )), |                 )), | ||||||
|             }, |             }, | ||||||
|         ]; |         ]; | ||||||
|  | |||||||
| @ -1,7 +1,21 @@ | |||||||
|  | mod bootstrap_01_prepare; | ||||||
|  | mod bootstrap_02_bootstrap; | ||||||
|  | mod bootstrap_03_control_plane; | ||||||
|  | mod bootstrap_04_workers; | ||||||
|  | mod bootstrap_05_sanity_check; | ||||||
|  | mod bootstrap_06_installation_report; | ||||||
| pub mod bootstrap_dhcp; | pub mod bootstrap_dhcp; | ||||||
| pub mod bootstrap_load_balancer; | pub mod bootstrap_load_balancer; | ||||||
| pub mod dhcp; | pub mod dhcp; | ||||||
| pub mod dns; | pub mod dns; | ||||||
|  | pub mod installation; | ||||||
| pub mod ipxe; | pub mod ipxe; | ||||||
| pub mod load_balancer; | pub mod load_balancer; | ||||||
|  | pub mod templates; | ||||||
| pub mod upgrade; | pub mod upgrade; | ||||||
|  | pub use bootstrap_01_prepare::*; | ||||||
|  | pub use bootstrap_02_bootstrap::*; | ||||||
|  | pub use bootstrap_03_control_plane::*; | ||||||
|  | pub use bootstrap_04_workers::*; | ||||||
|  | pub use bootstrap_05_sanity_check::*; | ||||||
|  | pub use bootstrap_06_installation_report::*; | ||||||
|  | |||||||
							
								
								
									
										20
									
								
								harmony/src/modules/okd/templates.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								harmony/src/modules/okd/templates.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,20 @@ | |||||||
|  | use askama::Template; | ||||||
|  | 
 | ||||||
|  | #[derive(Template)] | ||||||
|  | #[template(path = "okd/install-config.yaml.j2")] | ||||||
|  | pub struct InstallConfigYaml<'a> { | ||||||
|  |     pub cluster_domain: &'a str, | ||||||
|  |     pub pull_secret: &'a str, | ||||||
|  |     pub ssh_public_key: &'a str, | ||||||
|  |     pub cluster_name: &'a str, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Template)] | ||||||
|  | #[template(path = "okd/bootstrap.ipxe.j2")] | ||||||
|  | pub struct BootstrapIpxeTpl<'a> { | ||||||
|  |     pub http_ip: &'a str, | ||||||
|  |     pub scos_path: &'a str, | ||||||
|  |     pub installation_device: &'a str, | ||||||
|  |     pub ignition_http_path: &'a str, | ||||||
|  |     pub ignition_file_name: &'static str, | ||||||
|  | } | ||||||
| @ -1,4 +1,4 @@ | |||||||
| use std::{sync::Arc, time::Duration}; | use std::sync::Arc; | ||||||
| 
 | 
 | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::debug; | use log::debug; | ||||||
|  | |||||||
| @ -1,6 +1,63 @@ | |||||||
| #!ipxe | #!ipxe | ||||||
| 
 | 
 | ||||||
|  | # iPXE Chainloading Script | ||||||
|  | # | ||||||
|  | # Attempts to load a host-specific configuration file. If that fails, | ||||||
|  | # it logs the failure, waits for a few seconds, and then attempts to | ||||||
|  | # load a generic fallback configuration. | ||||||
|  | 
 | ||||||
|  | # --- Configuration --- | ||||||
| set base-url http://{{ gateway_ip }}:8080 | set base-url http://{{ gateway_ip }}:8080 | ||||||
| set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe | set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe | ||||||
|  | set fallbackfile ${base-url}/fallback.ipxe | ||||||
| 
 | 
 | ||||||
| chain ${hostfile} || chain ${base-url}/fallback.ipxe | # --- Script Logic --- | ||||||
|  | 
 | ||||||
|  | echo | ||||||
|  | echo "========================================" | ||||||
|  | echo "      iPXE Network Boot Initiated" | ||||||
|  | echo "========================================" | ||||||
|  | echo "Client MAC Address: ${mac}" | ||||||
|  | echo "Boot Server URL:    ${base-url}" | ||||||
|  | echo | ||||||
|  | 
 | ||||||
|  | # --- Primary Boot Attempt --- | ||||||
|  | echo "--> Attempting to load host-specific script..." | ||||||
|  | echo "    Location: ${hostfile}" | ||||||
|  | 
 | ||||||
|  | sleep 2 | ||||||
|  | 
 | ||||||
|  | # The "&& exit ||" pattern works as follows: | ||||||
|  | # 1. iPXE attempts to 'chain' the hostfile. | ||||||
|  | # 2. If successful (returns 0), the "&& exit" part is executed, and this script terminates. | ||||||
|  | # 3. If it fails (returns non-zero), the "||" part is triggered, and execution continues below. | ||||||
|  | chain --autofree --replace ${hostfile} && exit || | ||||||
|  | 
 | ||||||
|  | # --- Fallback Boot Attempt --- | ||||||
|  | # This part of the script is only reached if the 'chain ${hostfile}' command above failed. | ||||||
|  | echo | ||||||
|  | echo "--> Host-specific script not found or failed to load." | ||||||
|  | echo | ||||||
|  | 
 | ||||||
|  | echo | ||||||
|  | echo "--> Attempting to load fallback script..." | ||||||
|  | echo "    Location: ${fallbackfile}" | ||||||
|  | 
 | ||||||
|  | sleep 8 | ||||||
|  | 
 | ||||||
|  | chain --autofree --replace ${fallbackfile} && exit || | ||||||
|  | 
 | ||||||
|  | # --- Final Failure --- | ||||||
|  | # This part is only reached if BOTH chain commands have failed. | ||||||
|  | echo | ||||||
|  | echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||||
|  | echo "    FATAL: All boot scripts failed!" | ||||||
|  | echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||||
|  | echo "Could not load either the host-specific script or the fallback script." | ||||||
|  | echo "Dropping to iPXE shell for manual troubleshooting in 10 seconds." | ||||||
|  | sleep 8 | ||||||
|  | 
 | ||||||
|  | shell | ||||||
|  | 
 | ||||||
|  | # A final exit is good practice, though 'shell' is a blocking command. | ||||||
|  | exit | ||||||
|  | |||||||
							
								
								
									
										52
									
								
								harmony/templates/okd/bootstrap.ipxe.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								harmony/templates/okd/bootstrap.ipxe.j2
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,52 @@ | |||||||
|  | #!ipxe | ||||||
|  | 
 | ||||||
|  | # ================================================================== | ||||||
|  | #    MAC-Specific Boot Script for CoreOS/FCOS Installation | ||||||
|  | # ================================================================== | ||||||
|  | 
 | ||||||
|  | # --- Configuration --- | ||||||
|  | set http_ip {{ http_ip }} | ||||||
|  | set scos_path {{ scos_path }} | ||||||
|  | set inst_dev {{ installation_device }} | ||||||
|  | set ign_path {{ ignition_http_path }} | ||||||
|  | set ign_file {{ ignition_file_name }} | ||||||
|  | 
 | ||||||
|  | # --- Derived Variables --- | ||||||
|  | set base-url http://${http_ip}:8080 | ||||||
|  | set scos-base-url ${base-url}/${scos_path} | ||||||
|  | set ignition-url ${base-url}/${ign_path}/${ign_file} | ||||||
|  | 
 | ||||||
|  | # --- Pre-boot Logging & Verification --- | ||||||
|  | echo | ||||||
|  | echo "Starting MAC-specific installation..." | ||||||
|  | echo "--------------------------------------------------" | ||||||
|  | echo "  Installation Device: ${inst_dev}" | ||||||
|  | echo "  CoreOS Kernel URL:   ${scos-base-url}/scos-live-kernel.x86_64" | ||||||
|  | echo "  Ignition URL:        ${ignition-url}" | ||||||
|  | echo "--------------------------------------------------" | ||||||
|  | echo "Waiting for 3 seconds before loading boot assets..." | ||||||
|  | sleep 3 | ||||||
|  | 
 | ||||||
|  | # --- Load Boot Assets with Failure Checks --- | ||||||
|  | # The '|| goto failure' pattern provides a clean exit if any asset fails to load. | ||||||
|  | echo "Loading kernel..." | ||||||
|  | kernel ${scos-base-url}/scos-live-kernel.x86_64 initrd=main coreos.live.rootfs_url=${scos-base-url}/scos-live-rootfs.x86_64.img coreos.inst.install_dev=${inst_dev} coreos.inst.ignition_url=${ignition-url} || goto failure | ||||||
|  | 
 | ||||||
|  | echo "Loading initramfs..." | ||||||
|  | initrd --name main ${scos-base-url}/scos-live-initramfs.x86_64.img || goto failure | ||||||
|  | 
 | ||||||
|  | # --- Boot --- | ||||||
|  | echo "All assets loaded successfully. Starting boot process..." | ||||||
|  | boot || goto failure | ||||||
|  | 
 | ||||||
|  | # This part is never reached on successful boot. | ||||||
|  | 
 | ||||||
|  | # --- Failure Handling --- | ||||||
|  | :failure | ||||||
|  | echo | ||||||
|  | echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||||
|  | echo "  ERROR: A boot component failed to load." | ||||||
|  | echo "  Dropping to iPXE shell for manual debugging." | ||||||
|  | echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | ||||||
|  | sleep 10 | ||||||
|  | shell | ||||||
							
								
								
									
										24
									
								
								harmony/templates/okd/install-config.yaml.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								harmony/templates/okd/install-config.yaml.j2
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,24 @@ | |||||||
|  | # Built from https://docs.okd.io/latest/installing/installing_bare_metal/upi/installing-bare-metal.html#installation-bare-metal-config-yaml_installing-bare-metal | ||||||
|  | apiVersion: v1 | ||||||
|  | baseDomain: {{ cluster_domain }} | ||||||
|  | compute:  | ||||||
|  | - hyperthreading: Enabled  | ||||||
|  |   name: worker | ||||||
|  |   replicas: 0  | ||||||
|  | controlPlane:  | ||||||
|  |   hyperthreading: Enabled  | ||||||
|  |   name: master | ||||||
|  |   replicas: 3  | ||||||
|  | metadata: | ||||||
|  |   name: {{ cluster_name }}  | ||||||
|  | networking: | ||||||
|  |   clusterNetwork: | ||||||
|  |   - cidr: 10.128.0.0/14  | ||||||
|  |     hostPrefix: 23  | ||||||
|  |   networkType: OVNKubernetes  | ||||||
|  |   serviceNetwork:  | ||||||
|  |   - 172.30.0.0/16 | ||||||
|  | platform: | ||||||
|  |   none: {}  | ||||||
|  | pullSecret: '{{ pull_secret|safe }}'  | ||||||
|  | sshKey: '{{ ssh_public_key }}'  | ||||||
| @ -18,6 +18,7 @@ infisical = { git = "https://github.com/jggc/rust-sdk.git", branch = "patch-1" } | |||||||
| tokio.workspace = true | tokio.workspace = true | ||||||
| async-trait.workspace = true | async-trait.workspace = true | ||||||
| http.workspace = true | http.workspace = true | ||||||
|  | inquire.workspace = true | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| pretty_assertions.workspace = true | pretty_assertions.workspace = true | ||||||
|  | |||||||
| @ -9,6 +9,7 @@ use config::INFISICAL_ENVIRONMENT; | |||||||
| use config::INFISICAL_PROJECT_ID; | use config::INFISICAL_PROJECT_ID; | ||||||
| use config::INFISICAL_URL; | use config::INFISICAL_URL; | ||||||
| use config::SECRET_STORE; | use config::SECRET_STORE; | ||||||
|  | use log::debug; | ||||||
| use serde::{Serialize, de::DeserializeOwned}; | use serde::{Serialize, de::DeserializeOwned}; | ||||||
| use std::fmt; | use std::fmt; | ||||||
| use store::InfisicalSecretStore; | use store::InfisicalSecretStore; | ||||||
| @ -101,6 +102,7 @@ impl SecretManager { | |||||||
|     /// Retrieves and deserializes a secret.
 |     /// Retrieves and deserializes a secret.
 | ||||||
|     pub async fn get<T: Secret>() -> Result<T, SecretStoreError> { |     pub async fn get<T: Secret>() -> Result<T, SecretStoreError> { | ||||||
|         let manager = get_secret_manager().await; |         let manager = get_secret_manager().await; | ||||||
|  |         debug!("Getting secret ns {} key {}", &manager.namespace, T::KEY); | ||||||
|         let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?; |         let raw_value = manager.store.get_raw(&manager.namespace, T::KEY).await?; | ||||||
|         serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization { |         serde_json::from_slice(&raw_value).map_err(|e| SecretStoreError::Deserialization { | ||||||
|             key: T::KEY.to_string(), |             key: T::KEY.to_string(), | ||||||
| @ -108,6 +110,42 @@ impl SecretManager { | |||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub async fn get_or_prompt<T: Secret>() -> Result<T, SecretStoreError> { | ||||||
|  |         let secret = Self::get::<T>().await; | ||||||
|  |         let manager = get_secret_manager().await; | ||||||
|  |         let prompted = secret.is_err(); | ||||||
|  | 
 | ||||||
|  |         let secret = secret.or_else(|e| -> Result<T, SecretStoreError> { | ||||||
|  |             debug!("Could not get secret : {e}"); | ||||||
|  | 
 | ||||||
|  |             let ns = &manager.namespace; | ||||||
|  |             let key = T::KEY; | ||||||
|  |             let secret_json = inquire::Text::new(&format!( | ||||||
|  |                 "Secret not found for {} {}, paste the JSON here :", | ||||||
|  |                 ns, key | ||||||
|  |             )) | ||||||
|  |             .prompt() | ||||||
|  |             .map_err(|e| { | ||||||
|  |                 SecretStoreError::Store(format!("Failed to prompt secret {ns} {key} : {e}").into()) | ||||||
|  |             })?; | ||||||
|  | 
 | ||||||
|  |             let secret: T = serde_json::from_str(&secret_json).map_err(|e| { | ||||||
|  |                 SecretStoreError::Deserialization { | ||||||
|  |                     key: T::KEY.to_string(), | ||||||
|  |                     source: e, | ||||||
|  |                 } | ||||||
|  |             })?; | ||||||
|  | 
 | ||||||
|  |             Ok(secret) | ||||||
|  |         })?; | ||||||
|  | 
 | ||||||
|  |         if prompted { | ||||||
|  |             Self::set(&secret).await?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(secret) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     /// Serializes and stores a secret.
 |     /// Serializes and stores a secret.
 | ||||||
|     pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> { |     pub async fn set<T: Secret>(secret: &T) -> Result<(), SecretStoreError> { | ||||||
|         let manager = get_secret_manager().await; |         let manager = get_secret_manager().await; | ||||||
|  | |||||||
| @ -1,5 +1,5 @@ | |||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::info; | use log::{debug, info}; | ||||||
| use std::path::{Path, PathBuf}; | use std::path::{Path, PathBuf}; | ||||||
| 
 | 
 | ||||||
| use crate::{SecretStore, SecretStoreError}; | use crate::{SecretStore, SecretStoreError}; | ||||||
| @ -24,7 +24,7 @@ impl SecretStore for LocalFileSecretStore { | |||||||
|             .join("secrets"); |             .join("secrets"); | ||||||
| 
 | 
 | ||||||
|         let file_path = Self::get_file_path(&data_dir, ns, key); |         let file_path = Self::get_file_path(&data_dir, ns, key); | ||||||
|         info!( |         debug!( | ||||||
|             "LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}", |             "LOCAL_STORE: Getting key '{key}' from namespace '{ns}' at {}", | ||||||
|             file_path.display() |             file_path.display() | ||||||
|         ); |         ); | ||||||
|  | |||||||
| @ -48,6 +48,12 @@ impl From<String> for Id { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | impl From<Id> for String { | ||||||
|  |     fn from(value: Id) -> Self { | ||||||
|  |         value.to_string() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl std::fmt::Display for Id { | impl std::fmt::Display for Id { | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|         f.write_str(&self.value) |         f.write_str(&self.value) | ||||||
|  | |||||||
| @ -21,7 +21,7 @@ impl From<&MacAddress> for String { | |||||||
| 
 | 
 | ||||||
| impl std::fmt::Display for MacAddress { | impl std::fmt::Display for MacAddress { | ||||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||||
|         f.write_fmt(format_args!("MacAddress {}", String::from(self))) |         f.write_str(&String::from(self)) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
							
								
								
									
										5
									
								
								migrations/20250902035357_Host_role_mapping.sql
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								migrations/20250902035357_Host_role_mapping.sql
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,5 @@ | |||||||
|  | CREATE TABLE IF NOT EXISTS host_role_mapping ( | ||||||
|  |     id INTEGER PRIMARY KEY AUTOINCREMENT, | ||||||
|  |     host_id TEXT NOT NULL, | ||||||
|  |     role TEXT NOT NULL | ||||||
|  | ); | ||||||
| @ -36,6 +36,27 @@ pub struct DnsMasq { | |||||||
|     pub dhcp_options: Vec<DhcpOptions>, |     pub dhcp_options: Vec<DhcpOptions>, | ||||||
|     pub dhcp_boot: Vec<DhcpBoot>, |     pub dhcp_boot: Vec<DhcpBoot>, | ||||||
|     pub dhcp_tags: Vec<RawXml>, |     pub dhcp_tags: Vec<RawXml>, | ||||||
|  |     pub hosts: Vec<DnsmasqHost>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)] | ||||||
|  | #[yaserde(rename = "hosts")] | ||||||
|  | pub struct DnsmasqHost { | ||||||
|  |     #[yaserde(attribute = true)] | ||||||
|  |     pub uuid: String, | ||||||
|  |     pub host: String, | ||||||
|  |     pub domain: MaybeString, | ||||||
|  |     pub local: MaybeString, | ||||||
|  |     pub ip: MaybeString, | ||||||
|  |     pub cnames: MaybeString, | ||||||
|  |     pub client_id: MaybeString, | ||||||
|  |     pub hwaddr: MaybeString, | ||||||
|  |     pub lease_time: MaybeString, | ||||||
|  |     pub ignore: Option<u8>, | ||||||
|  |     pub set_tag: MaybeString, | ||||||
|  |     pub descr: MaybeString, | ||||||
|  |     pub comments: MaybeString, | ||||||
|  |     pub aliases: MaybeString, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Represents the <dhcp> element and its nested fields.
 | // Represents the <dhcp> element and its nested fields.
 | ||||||
|  | |||||||
| @ -189,7 +189,7 @@ pub struct System { | |||||||
|     pub timeservers: String, |     pub timeservers: String, | ||||||
|     pub webgui: WebGui, |     pub webgui: WebGui, | ||||||
|     pub usevirtualterminal: u8, |     pub usevirtualterminal: u8, | ||||||
|     pub disablenatreflection: String, |     pub disablenatreflection: Option<String>, | ||||||
|     pub disableconsolemenu: u8, |     pub disableconsolemenu: u8, | ||||||
|     pub disablevlanhwfilter: u8, |     pub disablevlanhwfilter: u8, | ||||||
|     pub disablechecksumoffloading: u8, |     pub disablechecksumoffloading: u8, | ||||||
| @ -256,7 +256,7 @@ pub struct Firmware { | |||||||
|     #[yaserde(rename = "type")] |     #[yaserde(rename = "type")] | ||||||
|     pub firmware_type: MaybeString, |     pub firmware_type: MaybeString, | ||||||
|     pub subscription: MaybeString, |     pub subscription: MaybeString, | ||||||
|     pub reboot: MaybeString, |     pub reboot: Option<MaybeString>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||||
| @ -267,12 +267,12 @@ pub struct Bogons { | |||||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||||
| pub struct Group { | pub struct Group { | ||||||
|     pub name: String, |     pub name: String, | ||||||
|     pub description: String, |     pub description: Option<String>, | ||||||
|     pub scope: String, |     pub scope: String, | ||||||
|     pub gid: u32, |     pub gid: u32, | ||||||
|     pub member: Vec<u32>, |     pub member: String, | ||||||
|     #[yaserde(rename = "priv")] |     #[yaserde(rename = "priv")] | ||||||
|     pub priv_field: String, |     pub priv_field: Option<String>, | ||||||
|     pub source_networks: Option<MaybeString>, |     pub source_networks: Option<MaybeString>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -1449,6 +1449,9 @@ pub struct Vip { | |||||||
|     pub advbase: Option<MaybeString>, |     pub advbase: Option<MaybeString>, | ||||||
|     pub advskew: Option<MaybeString>, |     pub advskew: Option<MaybeString>, | ||||||
|     pub descr: Option<MaybeString>, |     pub descr: Option<MaybeString>, | ||||||
|  |     pub peer: Option<MaybeString>, | ||||||
|  |     pub peer6: Option<MaybeString>, | ||||||
|  |     pub nosync: Option<MaybeString>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] | ||||||
|  | |||||||
| @ -21,6 +21,7 @@ serde_json = "1.0.133" | |||||||
| tokio-util = { version = "0.7.13", features = ["codec"] } | tokio-util = { version = "0.7.13", features = ["codec"] } | ||||||
| tokio-stream = "0.1.17" | tokio-stream = "0.1.17" | ||||||
| uuid.workspace = true | uuid.workspace = true | ||||||
|  | sha2 = "0.10.9" | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| pretty_assertions.workspace = true | pretty_assertions.workspace = true | ||||||
|  | |||||||
| @ -1,10 +1,10 @@ | |||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     config::{SshConfigManager, SshCredentials, SshOPNSenseShell}, |     config::{check_hash, get_hash, SshConfigManager, SshCredentials, SshOPNSenseShell}, | ||||||
|     error::Error, |     error::Error, | ||||||
|     modules::{ |     modules::{ | ||||||
|         caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::DnsConfig, |         caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig, | ||||||
|         dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig, |         dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
| @ -12,6 +12,7 @@ use log::{debug, info, trace, warn}; | |||||||
| use opnsense_config_xml::OPNsense; | use opnsense_config_xml::OPNsense; | ||||||
| use russh::client; | use russh::client; | ||||||
| use serde::Serialize; | use serde::Serialize; | ||||||
|  | use sha2::Digest; | ||||||
| 
 | 
 | ||||||
| use super::{ConfigManager, OPNsenseShell}; | use super::{ConfigManager, OPNsenseShell}; | ||||||
| 
 | 
 | ||||||
| @ -20,6 +21,7 @@ pub struct Config { | |||||||
|     opnsense: OPNsense, |     opnsense: OPNsense, | ||||||
|     repository: Arc<dyn ConfigManager>, |     repository: Arc<dyn ConfigManager>, | ||||||
|     shell: Arc<dyn OPNsenseShell>, |     shell: Arc<dyn OPNsenseShell>, | ||||||
|  |     hash: String, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl Serialize for Config { | impl Serialize for Config { | ||||||
| @ -36,8 +38,10 @@ impl Config { | |||||||
|         repository: Arc<dyn ConfigManager>, |         repository: Arc<dyn ConfigManager>, | ||||||
|         shell: Arc<dyn OPNsenseShell>, |         shell: Arc<dyn OPNsenseShell>, | ||||||
|     ) -> Result<Self, Error> { |     ) -> Result<Self, Error> { | ||||||
|  |         let (opnsense, hash) = Self::get_opnsense_instance(repository.clone()).await?; | ||||||
|         Ok(Self { |         Ok(Self { | ||||||
|             opnsense: Self::get_opnsense_instance(repository.clone()).await?, |             opnsense, | ||||||
|  |             hash, | ||||||
|             repository, |             repository, | ||||||
|             shell, |             shell, | ||||||
|         }) |         }) | ||||||
| @ -51,8 +55,8 @@ impl Config { | |||||||
|         DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) |         DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn dns(&mut self) -> DnsConfig<'_> { |     pub fn dns(&mut self) -> DhcpConfigDnsMasq<'_> { | ||||||
|         DnsConfig::new(&mut self.opnsense) |         DhcpConfigDnsMasq::new(&mut self.opnsense, self.shell.clone()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn tftp(&mut self) -> TftpConfig<'_> { |     pub fn tftp(&mut self) -> TftpConfig<'_> { | ||||||
| @ -146,7 +150,7 @@ impl Config { | |||||||
| 
 | 
 | ||||||
|     async fn reload_config(&mut self) -> Result<(), Error> { |     async fn reload_config(&mut self) -> Result<(), Error> { | ||||||
|         info!("Reloading opnsense live config"); |         info!("Reloading opnsense live config"); | ||||||
|         self.opnsense = Self::get_opnsense_instance(self.repository.clone()).await?; |         let (opnsense, sha2) = Self::get_opnsense_instance(self.repository.clone()).await?; | ||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -158,14 +162,15 @@ impl Config { | |||||||
|     /// Save the config to the repository. This method is meant NOT to reload services, only save
 |     /// Save the config to the repository. This method is meant NOT to reload services, only save
 | ||||||
|     /// the config to the live file/database and perhaps take a backup when relevant.
 |     /// the config to the live file/database and perhaps take a backup when relevant.
 | ||||||
|     pub async fn save(&self) -> Result<(), Error> { |     pub async fn save(&self) -> Result<(), Error> { | ||||||
|         self.repository.save_config(&self.opnsense.to_xml()).await |         let xml = &self.opnsense.to_xml(); | ||||||
|  |         self.repository.save_config(xml, &self.hash).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Save the configuration and reload all services. Be careful with this one as it will cause
 |     /// Save the configuration and reload all services. Be careful with this one as it will cause
 | ||||||
|     /// downtime in many cases, such as a PPPoE renegociation
 |     /// downtime in many cases, such as a PPPoE renegociation
 | ||||||
|     pub async fn apply(&self) -> Result<(), Error> { |     pub async fn apply(&self) -> Result<(), Error> { | ||||||
|         self.repository |         self.repository | ||||||
|             .apply_new_config(&self.opnsense.to_xml()) |             .apply_new_config(&self.opnsense.to_xml(), &self.hash) | ||||||
|             .await |             .await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -193,11 +198,14 @@ impl Config { | |||||||
|         Config::new(manager, shell).await.unwrap() |         Config::new(manager, shell).await.unwrap() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn get_opnsense_instance(repository: Arc<dyn ConfigManager>) -> Result<OPNsense, Error> { |     async fn get_opnsense_instance( | ||||||
|  |         repository: Arc<dyn ConfigManager>, | ||||||
|  |     ) -> Result<(OPNsense, String), Error> { | ||||||
|         let xml = repository.load_as_str().await?; |         let xml = repository.load_as_str().await?; | ||||||
|         trace!("xml {}", xml); |         trace!("xml {}", xml); | ||||||
| 
 | 
 | ||||||
|         Ok(OPNsense::from(xml)) |         let hash = get_hash(&xml); | ||||||
|  |         Ok((OPNsense::from(xml), hash)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub async fn run_command(&self, command: &str) -> Result<String, Error> { |     pub async fn run_command(&self, command: &str) -> Result<String, Error> { | ||||||
| @ -219,13 +227,14 @@ mod tests { | |||||||
|     #[tokio::test] |     #[tokio::test] | ||||||
|     async fn test_load_config_from_local_file() { |     async fn test_load_config_from_local_file() { | ||||||
|         for path in [ |         for path in [ | ||||||
|             "src/tests/data/config-opnsense-25.1.xml", |             // "src/tests/data/config-opnsense-25.1.xml",
 | ||||||
|             "src/tests/data/config-vm-test.xml", |             // "src/tests/data/config-vm-test.xml",
 | ||||||
|             "src/tests/data/config-structure.xml", |             "src/tests/data/config-structure.xml", | ||||||
|             "src/tests/data/config-full-1.xml", |             "src/tests/data/config-full-1.xml", | ||||||
|             "src/tests/data/config-full-ncd0.xml", |             // "src/tests/data/config-full-ncd0.xml",
 | ||||||
|             "src/tests/data/config-full-25.7.xml", |             // "src/tests/data/config-full-25.7.xml",
 | ||||||
|             "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml", |             // "src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml",
 | ||||||
|  |             "src/tests/data/config-25.7-dnsmasq-static-host.xml", | ||||||
|         ] { |         ] { | ||||||
|             let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); |             let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); | ||||||
|             test_file_path.push(path); |             test_file_path.push(path); | ||||||
| @ -243,13 +252,13 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|             let serialized = config.opnsense.to_xml(); |             let serialized = config.opnsense.to_xml(); | ||||||
| 
 | 
 | ||||||
|             fs::write("/tmp/serialized.xml", &serialized).unwrap(); |  | ||||||
| 
 |  | ||||||
|             // Since the order of all fields is not always the same in opnsense config files
 |             // Since the order of all fields is not always the same in opnsense config files
 | ||||||
|             // I think it is good enough to have exactly the same amount of the same lines
 |             // I think it is good enough to have exactly the same amount of the same lines
 | ||||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); |             let mut before = config_file_str.lines().collect::<Vec<_>>(); | ||||||
|             [config_file_str.lines().collect::<Vec<_>>()].sort(); |             let mut after = serialized.lines().collect::<Vec<_>>(); | ||||||
|             assert_eq!((), ()); |             before.sort(); | ||||||
|  |             after.sort(); | ||||||
|  |             assert_eq!(before, after); | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -279,8 +288,6 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         let serialized = config.opnsense.to_xml(); |         let serialized = config.opnsense.to_xml(); | ||||||
| 
 | 
 | ||||||
|         fs::write("/tmp/serialized.xml", &serialized).unwrap(); |  | ||||||
| 
 |  | ||||||
|         let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); |         let mut test_file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); | ||||||
|         test_file_path.push("src/tests/data/config-structure-with-dhcp-staticmap-entry.xml"); |         test_file_path.push("src/tests/data/config-structure-with-dhcp-staticmap-entry.xml"); | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,3 +1,4 @@ | |||||||
|  | use crate::config::check_hash; | ||||||
| use crate::config::manager::ConfigManager; | use crate::config::manager::ConfigManager; | ||||||
| use crate::error::Error; | use crate::error::Error; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| @ -20,11 +21,17 @@ impl ConfigManager for LocalFileConfigManager { | |||||||
|         Ok(fs::read_to_string(&self.file_path)?) |         Ok(fs::read_to_string(&self.file_path)?) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn save_config(&self, content: &str) -> Result<(), Error> { |     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||||
|  |         let current_content = self.load_as_str().await?; | ||||||
|  |         if !check_hash(¤t_content, hash) { | ||||||
|  |             return Err(Error::Config(format!( | ||||||
|  |                 "OPNSense config file changed since loading it! Hash when loading : {hash}" | ||||||
|  |             ))); | ||||||
|  |         } | ||||||
|         Ok(fs::write(&self.file_path, content)?) |         Ok(fs::write(&self.file_path, content)?) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error> { |     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||||
|         self.save_config(content).await |         self.save_config(content, hash).await | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -9,6 +9,8 @@ use crate::Error; | |||||||
| #[async_trait] | #[async_trait] | ||||||
| pub trait ConfigManager: std::fmt::Debug + Send + Sync { | pub trait ConfigManager: std::fmt::Debug + Send + Sync { | ||||||
|     async fn load_as_str(&self) -> Result<String, Error>; |     async fn load_as_str(&self) -> Result<String, Error>; | ||||||
|     async fn save_config(&self, content: &str) -> Result<(), Error>; |     /// Save a new version of the config file, making sure that the hash still represents the file
 | ||||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error>; |     /// currently stored in /conf/config.xml
 | ||||||
|  |     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error>; | ||||||
|  |     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error>; | ||||||
| } | } | ||||||
|  | |||||||
| @ -1,8 +1,9 @@ | |||||||
| use crate::config::{manager::ConfigManager, OPNsenseShell}; | use crate::config::{manager::ConfigManager, OPNsenseShell}; | ||||||
| use crate::error::Error; | use crate::error::Error; | ||||||
| use async_trait::async_trait; | use async_trait::async_trait; | ||||||
| use log::info; | use log::{info, warn}; | ||||||
| use russh_keys::key::KeyPair; | use russh_keys::key::KeyPair; | ||||||
|  | use sha2::Digest; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| @ -35,10 +36,10 @@ impl SshConfigManager { | |||||||
|             .await |             .await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn move_to_live_config(&self, new_config_path: &str) -> Result<String, Error> { |     async fn copy_to_live_config(&self, new_config_path: &str) -> Result<String, Error> { | ||||||
|         info!("Overwriting OPNSense /conf/config.xml with {new_config_path}"); |         info!("Overwriting OPNSense /conf/config.xml with {new_config_path}"); | ||||||
|         self.opnsense_shell |         self.opnsense_shell | ||||||
|             .exec(&format!("mv {new_config_path} /conf/config.xml")) |             .exec(&format!("cp {new_config_path} /conf/config.xml")) | ||||||
|             .await |             .await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -56,19 +57,41 @@ impl ConfigManager for SshConfigManager { | |||||||
|         self.opnsense_shell.exec("cat /conf/config.xml").await |         self.opnsense_shell.exec("cat /conf/config.xml").await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn save_config(&self, content: &str) -> Result<(), Error> { |     async fn save_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||||
|  |         let current_content = self.load_as_str().await?; | ||||||
|  | 
 | ||||||
|  |         if !check_hash(¤t_content, hash) { | ||||||
|  |             warn!("OPNSense config file changed since loading it! Hash when loading : {hash}"); | ||||||
|  |             // return Err(Error::Config(format!(
 | ||||||
|  |             //     "OPNSense config file changed since loading it! Hash when loading : {hash}"
 | ||||||
|  |             // )));
 | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|         let temp_filename = self |         let temp_filename = self | ||||||
|             .opnsense_shell |             .opnsense_shell | ||||||
|             .write_content_to_temp_file(content) |             .write_content_to_temp_file(content) | ||||||
|             .await?; |             .await?; | ||||||
|         self.backup_config_remote().await?; |         self.backup_config_remote().await?; | ||||||
|         self.move_to_live_config(&temp_filename).await?; |         self.copy_to_live_config(&temp_filename).await?; | ||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn apply_new_config(&self, content: &str) -> Result<(), Error> { |     async fn apply_new_config(&self, content: &str, hash: &str) -> Result<(), Error> { | ||||||
|         self.save_config(content).await?; |         self.save_config(content, &hash).await?; | ||||||
|         self.reload_all_services().await?; |         self.reload_all_services().await?; | ||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | pub fn get_hash(content: &str) -> String { | ||||||
|  |     let mut hasher = sha2::Sha256::new(); | ||||||
|  |     hasher.update(content.as_bytes()); | ||||||
|  |     let hash_bytes = hasher.finalize(); | ||||||
|  |     let hash_string = format!("{:x}", hash_bytes); | ||||||
|  |     info!("Loaded OPNSense config.xml with hash {hash_string:?}"); | ||||||
|  |     hash_string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn check_hash(content: &str, source_hash: &str) -> bool { | ||||||
|  |     get_hash(content) == source_hash | ||||||
|  | } | ||||||
|  | |||||||
| @ -39,7 +39,7 @@ impl OPNsenseShell for SshOPNSenseShell { | |||||||
| 
 | 
 | ||||||
|     async fn write_content_to_temp_file(&self, content: &str) -> Result<String, Error> { |     async fn write_content_to_temp_file(&self, content: &str) -> Result<String, Error> { | ||||||
|         let temp_filename = format!( |         let temp_filename = format!( | ||||||
|             "/tmp/opnsense-config-tmp-config_{}", |             "/conf/harmony/opnsense-config-{}", | ||||||
|             SystemTime::now() |             SystemTime::now() | ||||||
|                 .duration_since(UNIX_EPOCH) |                 .duration_since(UNIX_EPOCH) | ||||||
|                 .unwrap() |                 .unwrap() | ||||||
|  | |||||||
| @ -1,4 +1,4 @@ | |||||||
| #[derive(Debug)] | #[derive(Debug, PartialEq)] | ||||||
| pub enum DhcpError { | pub enum DhcpError { | ||||||
|     InvalidMacAddress(String), |     InvalidMacAddress(String), | ||||||
|     InvalidIpAddress(String), |     InvalidIpAddress(String), | ||||||
|  | |||||||
| @ -1,10 +1,10 @@ | |||||||
| use opnsense_config_xml::{Host, OPNsense}; | use opnsense_config_xml::{Host, OPNsense}; | ||||||
| 
 | 
 | ||||||
| pub struct DnsConfig<'a> { | pub struct UnboundDnsConfig<'a> { | ||||||
|     opnsense: &'a mut OPNsense, |     opnsense: &'a mut OPNsense, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<'a> DnsConfig<'a> { | impl<'a> UnboundDnsConfig<'a> { | ||||||
|     pub fn new(opnsense: &'a mut OPNsense) -> Self { |     pub fn new(opnsense: &'a mut OPNsense) -> Self { | ||||||
|         Self { opnsense } |         Self { opnsense } | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -1,9 +1,12 @@ | |||||||
| // dnsmasq.rs
 | // dnsmasq.rs
 | ||||||
| use crate::modules::dhcp::DhcpError; | use crate::modules::dhcp::DhcpError; | ||||||
| use log::{debug, info}; | use log::{debug, info, warn}; | ||||||
|  | use opnsense_config_xml::dnsmasq::{DhcpRange, DnsMasq, DnsmasqHost}; // Assuming DhcpRange is defined in opnsense_config_xml::dnsmasq
 | ||||||
| use opnsense_config_xml::{MaybeString, StaticMap}; | use opnsense_config_xml::{MaybeString, StaticMap}; | ||||||
|  | use std::collections::HashSet; | ||||||
| use std::net::Ipv4Addr; | use std::net::Ipv4Addr; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
|  | use uuid::Uuid; | ||||||
| 
 | 
 | ||||||
| use opnsense_config_xml::OPNsense; | use opnsense_config_xml::OPNsense; | ||||||
| 
 | 
 | ||||||
| @ -25,74 +28,167 @@ impl<'a> DhcpConfigDnsMasq<'a> { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Removes a static mapping by its MAC address.
 |     /// Removes a MAC address from a static mapping.
 | ||||||
|     /// Static mappings are stored in the <dhcpd> section of the config, shared with the ISC module.
 |     /// If the mapping has no other MAC addresses associated with it, the entire host entry is removed.
 | ||||||
|     pub fn remove_static_mapping(&mut self, mac: &str) { |     pub fn remove_static_mapping(&mut self, mac_to_remove: &str) { | ||||||
|         let lan_dhcpd = self.get_lan_dhcpd(); |         let dnsmasq = self.get_dnsmasq(); | ||||||
|         lan_dhcpd | 
 | ||||||
|             .staticmaps |         // Update hwaddr fields for hosts that contain the MAC, removing it from the comma-separated list.
 | ||||||
|             .retain(|static_entry| static_entry.mac != mac); |         for host in dnsmasq.hosts.iter_mut() { | ||||||
|  |             let mac = host.hwaddr.content_string(); | ||||||
|  |             let original_macs: Vec<&str> = mac.split(',').collect(); | ||||||
|  |             if original_macs | ||||||
|  |                 .iter() | ||||||
|  |                 .any(|m| m.eq_ignore_ascii_case(mac_to_remove)) | ||||||
|  |             { | ||||||
|  |                 let updated_macs: Vec<&str> = original_macs | ||||||
|  |                     .into_iter() | ||||||
|  |                     .filter(|m| !m.eq_ignore_ascii_case(mac_to_remove)) | ||||||
|  |                     .collect(); | ||||||
|  |                 host.hwaddr = updated_macs.join(",").into(); | ||||||
|  |             } | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|     /// Retrieves a mutable reference to the LAN interface's DHCP configuration.
 |         // Remove any host entries that no longer have any MAC addresses.
 | ||||||
|     /// This is located in the shared <dhcpd> section of the config.
 |         dnsmasq | ||||||
|     fn get_lan_dhcpd(&mut self) -> &mut opnsense_config_xml::DhcpInterface { |             .hosts | ||||||
|         &mut self |             .retain(|host_entry| !host_entry.hwaddr.content_string().is_empty()); | ||||||
|             .opnsense |  | ||||||
|             .dhcpd |  | ||||||
|             .elements |  | ||||||
|             .iter_mut() |  | ||||||
|             .find(|(name, _config)| name == "lan") |  | ||||||
|             .expect("Interface lan should have dhcpd activated") |  | ||||||
|             .1 |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Adds a new static DHCP mapping.
 |     /// Retrieves a mutable reference to the DnsMasq configuration.
 | ||||||
|     /// Validates the MAC address and checks for existing mappings to prevent conflicts.
 |     /// This is located in the <dnsmasq> section of the OPNsense config.
 | ||||||
|  |     fn get_dnsmasq(&mut self) -> &mut DnsMasq { | ||||||
|  |         self.opnsense | ||||||
|  |             .dnsmasq | ||||||
|  |             .as_mut() | ||||||
|  |             .expect("Dnsmasq config must be initialized") | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Adds or updates a static DHCP mapping.
 | ||||||
|  |     ///
 | ||||||
|  |     /// This function implements specific logic to handle existing entries:
 | ||||||
|  |     /// - If no host exists for the given IP or hostname, a new entry is created.
 | ||||||
|  |     /// - If exactly one host exists for the IP and/or hostname, the new MAC is appended to it.
 | ||||||
|  |     /// - It will error if the IP and hostname exist but point to two different host entries,
 | ||||||
|  |     ///   as this represents an unresolvable conflict.
 | ||||||
|  |     /// - It will also error if multiple entries are found for the IP or hostname, indicating an
 | ||||||
|  |     ///   ambiguous state.
 | ||||||
|     pub fn add_static_mapping( |     pub fn add_static_mapping( | ||||||
|         &mut self, |         &mut self, | ||||||
|         mac: &str, |         mac: &Vec<String>, | ||||||
|         ipaddr: Ipv4Addr, |         ipaddr: &Ipv4Addr, | ||||||
|         hostname: &str, |         hostname: &str, | ||||||
|     ) -> Result<(), DhcpError> { |     ) -> Result<(), DhcpError> { | ||||||
|         let mac = mac.to_string(); |         let mut hostname_split = hostname.split("."); | ||||||
|         let hostname = hostname.to_string(); |         let hostname = hostname_split.next().expect("hostname cannot be empty"); | ||||||
|         let lan_dhcpd = self.get_lan_dhcpd(); |         let domain_name = hostname_split.collect::<Vec<&str>>().join("."); | ||||||
|         let existing_mappings: &mut Vec<StaticMap> = &mut lan_dhcpd.staticmaps; |  | ||||||
| 
 | 
 | ||||||
|         if !Self::is_valid_mac(&mac) { |         if let Some(m) = mac.iter().find(|m| !Self::is_valid_mac(m)) { | ||||||
|             return Err(DhcpError::InvalidMacAddress(mac)); |             return Err(DhcpError::InvalidMacAddress(m.to_string())); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // TODO: Validate that the IP address is within a configured DHCP range.
 |         let ip_str = ipaddr.to_string(); | ||||||
|  |         let hosts = &mut self.get_dnsmasq().hosts; | ||||||
| 
 | 
 | ||||||
|         if existing_mappings |         let ip_indices: Vec<usize> = hosts | ||||||
|             .iter() |             .iter() | ||||||
|             .any(|m| m.ipaddr == ipaddr.to_string() && m.mac == mac) |             .enumerate() | ||||||
|         { |             .filter(|(_, h)| h.ip.content_string() == ip_str) | ||||||
|             info!("Mapping already exists for {} [{}], skipping", ipaddr, mac); |             .map(|(i, _)| i) | ||||||
|             return Ok(()); |             .collect(); | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         if existing_mappings |         let hostname_indices: Vec<usize> = hosts | ||||||
|             .iter() |             .iter() | ||||||
|             .any(|m| m.ipaddr == ipaddr.to_string()) |             .enumerate() | ||||||
|  |             .filter(|(_, h)| h.host == hostname) | ||||||
|  |             .map(|(i, _)| i) | ||||||
|  |             .collect(); | ||||||
|  | 
 | ||||||
|  |         let ip_set: HashSet<usize> = ip_indices.iter().cloned().collect(); | ||||||
|  |         let hostname_set: HashSet<usize> = hostname_indices.iter().cloned().collect(); | ||||||
|  | 
 | ||||||
|  |         if !ip_indices.is_empty() | ||||||
|  |             && !hostname_indices.is_empty() | ||||||
|  |             && ip_set.intersection(&hostname_set).count() == 0 | ||||||
|         { |         { | ||||||
|             return Err(DhcpError::IpAddressAlreadyMapped(ipaddr.to_string())); |             return Err(DhcpError::Configuration(format!( | ||||||
|  |                 "Configuration conflict: IP {} and hostname '{}' exist, but in different static host entries.", | ||||||
|  |                 ipaddr, hostname | ||||||
|  |             ))); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         if existing_mappings.iter().any(|m| m.mac == mac) { |         let mut all_indices: Vec<&usize> = ip_set.union(&hostname_set).collect(); | ||||||
|             return Err(DhcpError::MacAddressAlreadyMapped(mac)); |         all_indices.sort(); | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         let static_map = StaticMap { |         let mac_list = mac.join(","); | ||||||
|             mac, | 
 | ||||||
|             ipaddr: ipaddr.to_string(), |         match all_indices.len() { | ||||||
|             hostname: hostname, |             0 => { | ||||||
|  |                 info!( | ||||||
|  |                     "Creating new static host for {} ({}) with MAC {}", | ||||||
|  |                     hostname, ipaddr, mac_list | ||||||
|  |                 ); | ||||||
|  |                 let new_host = DnsmasqHost { | ||||||
|  |                     uuid: Uuid::new_v4().to_string(), | ||||||
|  |                     host: hostname.to_string(), | ||||||
|  |                     ip: ip_str.into(), | ||||||
|  |                     hwaddr: mac_list.into(), | ||||||
|  |                     local: MaybeString::from("1"), | ||||||
|  |                     ignore: Some(0), | ||||||
|  |                     domain: domain_name.into(), | ||||||
|                     ..Default::default() |                     ..Default::default() | ||||||
|                 }; |                 }; | ||||||
|  |                 hosts.push(new_host); | ||||||
|  |             } | ||||||
|  |             1 => { | ||||||
|  |                 let host_index = *all_indices[0]; | ||||||
|  |                 let host_to_modify = &mut hosts[host_index]; | ||||||
|  |                 let host_to_modify_ip = host_to_modify.ip.content_string(); | ||||||
|  |                 if host_to_modify_ip != ip_str { | ||||||
|  |                     warn!( | ||||||
|  |                         "Hostname '{}' already exists with a different IP ({}). Setting new IP {ip_str}. Appending MAC {}.", | ||||||
|  |                         hostname, host_to_modify_ip, mac_list | ||||||
|  |                     ); | ||||||
|  |                     host_to_modify.ip.content = Some(ip_str); | ||||||
|  |                 } else if host_to_modify.host != hostname { | ||||||
|  |                     warn!( | ||||||
|  |                         "IP {} already exists with a different hostname ('{}'). Setting hostname to {hostname}. Appending MAC {}.", | ||||||
|  |                         ipaddr, host_to_modify.host, mac_list | ||||||
|  |                     ); | ||||||
|  |                     host_to_modify.host = hostname.to_string(); | ||||||
|  |                 } | ||||||
|  | 
 | ||||||
|  |                 for single_mac in mac.iter() { | ||||||
|  |                     if !host_to_modify | ||||||
|  |                         .hwaddr | ||||||
|  |                         .content_string() | ||||||
|  |                         .split(',') | ||||||
|  |                         .any(|m| m.eq_ignore_ascii_case(single_mac)) | ||||||
|  |                     { | ||||||
|  |                         info!( | ||||||
|  |                             "Appending MAC {} to existing static host for {} ({})", | ||||||
|  |                             single_mac, host_to_modify.host, host_to_modify_ip | ||||||
|  |                         ); | ||||||
|  |                         let mut updated_macs = host_to_modify.hwaddr.content_string().to_string(); | ||||||
|  |                         updated_macs.push(','); | ||||||
|  |                         updated_macs.push_str(single_mac); | ||||||
|  |                         host_to_modify.hwaddr.content = updated_macs.into(); | ||||||
|  |                     } else { | ||||||
|  |                         debug!( | ||||||
|  |                         "MAC {} already present in static host entry for {} ({}). No changes made.", | ||||||
|  |                         single_mac, host_to_modify.host, host_to_modify_ip | ||||||
|  |                     ); | ||||||
|  |                     } | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             _ => { | ||||||
|  |                 return Err(DhcpError::Configuration(format!( | ||||||
|  |                     "Configuration conflict: Found multiple host entries matching IP {} and/or hostname '{}'. Cannot resolve automatically.", | ||||||
|  |                     ipaddr, hostname | ||||||
|  |                 ))); | ||||||
|  |             } | ||||||
|  |         } | ||||||
| 
 | 
 | ||||||
|         existing_mappings.push(static_map); |  | ||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -110,13 +206,20 @@ impl<'a> DhcpConfigDnsMasq<'a> { | |||||||
|     /// Retrieves the list of current static mappings by shelling out to `configctl`.
 |     /// Retrieves the list of current static mappings by shelling out to `configctl`.
 | ||||||
|     /// This provides the real-time state from the running system.
 |     /// This provides the real-time state from the running system.
 | ||||||
|     pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> { |     pub async fn get_static_mappings(&self) -> Result<Vec<StaticMap>, Error> { | ||||||
|  |         // Note: This command is for the 'dhcpd' service. If dnsmasq uses a different command
 | ||||||
|  |         // or key, this will need to be adjusted.
 | ||||||
|         let list_static_output = self |         let list_static_output = self | ||||||
|             .opnsense_shell |             .opnsense_shell | ||||||
|             .exec("configctl dhcpd list static") |             .exec("configctl dhcpd list static") | ||||||
|             .await?; |             .await?; | ||||||
| 
 | 
 | ||||||
|         let value: serde_json::Value = serde_json::from_str(&list_static_output) |         let value: serde_json::Value = serde_json::from_str(&list_static_output).map_err(|e| { | ||||||
|             .unwrap_or_else(|_| panic!("Got invalid json from configctl {list_static_output}")); |             Error::Command(format!( | ||||||
|  |                 "Got invalid json from configctl {list_static_output} : {e}" | ||||||
|  |             )) | ||||||
|  |         })?; | ||||||
|  | 
 | ||||||
|  |         // The JSON output key might be 'dhcpd' even when dnsmasq is the backend.
 | ||||||
|         let static_maps = value["dhcpd"] |         let static_maps = value["dhcpd"] | ||||||
|             .as_array() |             .as_array() | ||||||
|             .ok_or(Error::Command(format!( |             .ok_or(Error::Command(format!( | ||||||
| @ -135,6 +238,36 @@ impl<'a> DhcpConfigDnsMasq<'a> { | |||||||
|         Ok(static_maps) |         Ok(static_maps) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub async fn set_dhcp_range(&mut self, start: &str, end: &str) -> Result<(), DhcpError> { | ||||||
|  |         let dnsmasq = self.get_dnsmasq(); | ||||||
|  |         let ranges = &mut dnsmasq.dhcp_ranges; | ||||||
|  | 
 | ||||||
|  |         // Assuming DnsMasq has dhcp_ranges: Vec<DhcpRange>
 | ||||||
|  |         // Find existing range for "lan" interface
 | ||||||
|  |         if let Some(range) = ranges | ||||||
|  |             .iter_mut() | ||||||
|  |             .find(|r| r.interface == Some("lan".to_string())) | ||||||
|  |         { | ||||||
|  |             // Update existing range
 | ||||||
|  |             range.start_addr = Some(start.to_string()); | ||||||
|  |             range.end_addr = Some(end.to_string()); | ||||||
|  |         } else { | ||||||
|  |             // Create new range
 | ||||||
|  |             let new_range = DhcpRange { | ||||||
|  |                 uuid: Some(Uuid::new_v4().to_string()), | ||||||
|  |                 interface: Some("lan".to_string()), | ||||||
|  |                 start_addr: Some(start.to_string()), | ||||||
|  |                 end_addr: Some(end.to_string()), | ||||||
|  |                 domain_type: Some("range".to_string()), | ||||||
|  |                 nosync: Some(0), | ||||||
|  |                 ..Default::default() | ||||||
|  |             }; | ||||||
|  |             ranges.push(new_range); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub async fn set_pxe_options( |     pub async fn set_pxe_options( | ||||||
|         &self, |         &self, | ||||||
|         tftp_ip: Option<String>, |         tftp_ip: Option<String>, | ||||||
| @ -142,9 +275,9 @@ impl<'a> DhcpConfigDnsMasq<'a> { | |||||||
|         efi_filename: String, |         efi_filename: String, | ||||||
|         ipxe_filename: String, |         ipxe_filename: String, | ||||||
|     ) -> Result<(), DhcpError> { |     ) -> Result<(), DhcpError> { | ||||||
|         // As of writing this opnsense does not support negative tags, and the dnsmasq config is a
 |         // OPNsense does not support negative tags via its API for dnsmasq, and the required
 | ||||||
|         // bit complicated anyways. So we are writing directly a dnsmasq config file to
 |         // logic is complex. Therefore, we write a configuration file directly to the
 | ||||||
|         // /usr/local/etc/dnsmasq.conf.d
 |         // dnsmasq.conf.d directory to achieve the desired PXE boot behavior.
 | ||||||
|         let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}")); |         let tftp_str = tftp_ip.map_or(String::new(), |i| format!(",{i},{i}")); | ||||||
| 
 | 
 | ||||||
|         let config = format!( |         let config = format!( | ||||||
| @ -163,7 +296,7 @@ dhcp-boot=tag:efi,tag:!ipxe,{efi_filename}{tftp_str} | |||||||
| dhcp-boot=tag:ipxe,{ipxe_filename}{tftp_str} | dhcp-boot=tag:ipxe,{ipxe_filename}{tftp_str} | ||||||
| 
 | 
 | ||||||
| # Provide undionly to legacy bios clients | # Provide undionly to legacy bios clients | ||||||
| dhcp-boot=tag:bios,{bios_filename}{tftp_str} | dhcp-boot=tag:bios,tag:!ipxe,{bios_filename}{tftp_str} | ||||||
| " | " | ||||||
|         ); |         ); | ||||||
|         info!("Writing configuration file to {DNS_MASQ_PXE_CONFIG_FILE}"); |         info!("Writing configuration file to {DNS_MASQ_PXE_CONFIG_FILE}"); | ||||||
| @ -185,3 +318,302 @@ dhcp-boot=tag:bios,{bios_filename}{tftp_str} | |||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[cfg(test)] | ||||||
|  | mod test { | ||||||
|  |     use crate::config::DummyOPNSenseShell; | ||||||
|  | 
 | ||||||
|  |     use super::*; | ||||||
|  |     use opnsense_config_xml::OPNsense; | ||||||
|  |     use std::net::Ipv4Addr; | ||||||
|  |     use std::sync::Arc; | ||||||
|  | 
 | ||||||
|  |     /// Helper function to create a DnsmasqHost with minimal boilerplate.
 | ||||||
|  |     fn create_host(uuid: &str, host: &str, ip: &str, hwaddr: &str) -> DnsmasqHost { | ||||||
|  |         DnsmasqHost { | ||||||
|  |             uuid: uuid.to_string(), | ||||||
|  |             host: host.to_string(), | ||||||
|  |             ip: ip.into(), | ||||||
|  |             hwaddr: hwaddr.into(), | ||||||
|  |             local: MaybeString::from("1"), | ||||||
|  |             ignore: Some(0), | ||||||
|  |             ..Default::default() | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Helper to set up the test environment with an initial OPNsense configuration.
 | ||||||
|  |     fn setup_test_env(initial_hosts: Vec<DnsmasqHost>) -> DhcpConfigDnsMasq<'static> { | ||||||
|  |         let opnsense_config = Box::leak(Box::new(OPNsense { | ||||||
|  |             dnsmasq: Some(DnsMasq { | ||||||
|  |                 hosts: initial_hosts, | ||||||
|  |                 ..Default::default() | ||||||
|  |             }), | ||||||
|  |             ..Default::default() | ||||||
|  |         })); | ||||||
|  | 
 | ||||||
|  |         DhcpConfigDnsMasq::new(opnsense_config, Arc::new(DummyOPNSenseShell {})) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_first_static_mapping() { | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![]); | ||||||
|  |         let ip = Ipv4Addr::new(192, 168, 1, 10); | ||||||
|  |         let mac = "00:11:22:33:44:55"; | ||||||
|  |         let hostname = "new-host"; | ||||||
|  | 
 | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping(&vec![mac.to_string()], &ip, hostname) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         let host = &hosts[0]; | ||||||
|  |         assert_eq!(host.host, hostname); | ||||||
|  |         assert_eq!(host.ip, ip.to_string().into()); | ||||||
|  |         assert_eq!(host.hwaddr.content_string(), mac); | ||||||
|  |         assert!(Uuid::parse_str(&host.uuid).is_ok()); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_hostname_split_into_host_domain() { | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![]); | ||||||
|  |         let ip = Ipv4Addr::new(192, 168, 1, 10); | ||||||
|  |         let mac = "00:11:22:33:44:55"; | ||||||
|  |         let hostname = "new-host"; | ||||||
|  |         let domain = "some.domain"; | ||||||
|  | 
 | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping(&vec![mac.to_string()], &ip, &format!("{hostname}.{domain}")) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         let host = &hosts[0]; | ||||||
|  |         assert_eq!(host.host, hostname); | ||||||
|  |         assert_eq!(host.domain.content_string(), domain); | ||||||
|  |         assert_eq!(host.ip, ip.to_string().into()); | ||||||
|  |         assert_eq!(host.hwaddr.content_string(), mac); | ||||||
|  |         assert!(Uuid::parse_str(&host.uuid).is_ok()); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_mac_to_existing_host_by_ip_and_hostname() { | ||||||
|  |         let initial_host = create_host( | ||||||
|  |             "uuid-1", | ||||||
|  |             "existing-host", | ||||||
|  |             "192.168.1.20", | ||||||
|  |             "AA:BB:CC:DD:EE:FF", | ||||||
|  |         ); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||||
|  |         let ip = Ipv4Addr::new(192, 168, 1, 20); | ||||||
|  |         let new_mac = "00:11:22:33:44:55"; | ||||||
|  |         let hostname = "existing-host"; | ||||||
|  | 
 | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping(&vec![new_mac.to_string()], &ip, hostname) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         let host = &hosts[0]; | ||||||
|  |         assert_eq!( | ||||||
|  |             host.hwaddr.content_string(), | ||||||
|  |             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||||
|  |         ); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_mac_to_existing_host_by_ip_only() { | ||||||
|  |         let initial_host = create_host( | ||||||
|  |             "uuid-1", | ||||||
|  |             "existing-host", | ||||||
|  |             "192.168.1.20", | ||||||
|  |             "AA:BB:CC:DD:EE:FF", | ||||||
|  |         ); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||||
|  |         let ip = Ipv4Addr::new(192, 168, 1, 20); | ||||||
|  |         let new_mac = "00:11:22:33:44:55"; | ||||||
|  | 
 | ||||||
|  |         // Using a different hostname should still find the host by IP and log a warning.
 | ||||||
|  |         let new_hostname = "different-host-name"; | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping(&vec![new_mac.to_string()], &ip, new_hostname) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         let host = &hosts[0]; | ||||||
|  |         assert_eq!( | ||||||
|  |             host.hwaddr.content_string(), | ||||||
|  |             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||||
|  |         ); | ||||||
|  |         assert_eq!(host.host, new_hostname); // hostname should be updated
 | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_mac_to_existing_host_by_hostname_only() { | ||||||
|  |         let initial_host = create_host( | ||||||
|  |             "uuid-1", | ||||||
|  |             "existing-host", | ||||||
|  |             "192.168.1.20", | ||||||
|  |             "AA:BB:CC:DD:EE:FF", | ||||||
|  |         ); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||||
|  |         let new_mac = "00:11:22:33:44:55"; | ||||||
|  |         let hostname = "existing-host"; | ||||||
|  | 
 | ||||||
|  |         // Using a different IP should still find the host by hostname and log a warning.
 | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping( | ||||||
|  |                 &vec![new_mac.to_string()], | ||||||
|  |                 &Ipv4Addr::new(192, 168, 1, 99), | ||||||
|  |                 hostname, | ||||||
|  |             ) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         let host = &hosts[0]; | ||||||
|  |         assert_eq!( | ||||||
|  |             host.hwaddr.content_string(), | ||||||
|  |             "AA:BB:CC:DD:EE:FF,00:11:22:33:44:55" | ||||||
|  |         ); | ||||||
|  |         assert_eq!(host.ip.content_string(), "192.168.1.99"); // Original IP should be preserved.
 | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_duplicate_mac_to_host() { | ||||||
|  |         let initial_mac = "AA:BB:CC:DD:EE:FF"; | ||||||
|  |         let initial_host = create_host("uuid-1", "host-1", "192.168.1.20", initial_mac); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![initial_host]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config | ||||||
|  |             .add_static_mapping( | ||||||
|  |                 &vec![initial_mac.to_string()], | ||||||
|  |                 &Ipv4Addr::new(192, 168, 1, 20), | ||||||
|  |                 "host-1", | ||||||
|  |             ) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         assert_eq!(hosts[0].hwaddr.content_string(), initial_mac); // No change, no duplication.
 | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_add_invalid_mac_address() { | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![]); | ||||||
|  |         let result = dhcp_config.add_static_mapping( | ||||||
|  |             &vec!["invalid-mac".to_string()], | ||||||
|  |             &Ipv4Addr::new(10, 0, 0, 1), | ||||||
|  |             "host", | ||||||
|  |         ); | ||||||
|  |         assert!(matches!(result, Err(DhcpError::InvalidMacAddress(_)))); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_error_on_conflicting_ip_and_hostname() { | ||||||
|  |         let host_a = create_host("uuid-a", "host-a", "192.168.1.10", "AA:AA:AA:AA:AA:AA"); | ||||||
|  |         let host_b = create_host("uuid-b", "host-b", "192.168.1.20", "BB:BB:BB:BB:BB:BB"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host_a, host_b]); | ||||||
|  | 
 | ||||||
|  |         let result = dhcp_config.add_static_mapping( | ||||||
|  |             &vec!["CC:CC:CC:CC:CC:CC".to_string()], | ||||||
|  |             &Ipv4Addr::new(192, 168, 1, 10), | ||||||
|  |             "host-b", | ||||||
|  |         ); | ||||||
|  |         // This IP belongs to host-a, but the hostname belongs to host-b.
 | ||||||
|  |         assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: IP 192.168.1.10 and hostname 'host-b' exist, but in different static host entries.".to_string()))); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_error_on_multiple_ip_matches() { | ||||||
|  |         let host_a = create_host("uuid-a", "host-a", "192.168.1.30", "AA:AA:AA:AA:AA:AA"); | ||||||
|  |         let host_b = create_host("uuid-b", "host-b", "192.168.1.30", "BB:BB:BB:BB:BB:BB"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host_a, host_b]); | ||||||
|  | 
 | ||||||
|  |         // This IP is ambiguous.
 | ||||||
|  |         let result = dhcp_config.add_static_mapping( | ||||||
|  |             &vec!["CC:CC:CC:CC:CC:CC".to_string()], | ||||||
|  |             &Ipv4Addr::new(192, 168, 1, 30), | ||||||
|  |             "new-host", | ||||||
|  |         ); | ||||||
|  |         assert_eq!(result, Err(DhcpError::Configuration("Configuration conflict: Found multiple host entries matching IP 192.168.1.30 and/or hostname 'new-host'. Cannot resolve automatically.".to_string()))); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_remove_mac_from_multi_mac_host() { | ||||||
|  |         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2,mac-3"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config.remove_static_mapping("mac-2"); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         assert_eq!(hosts[0].hwaddr.content_string(), "mac-1,mac-3"); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_remove_last_mac_from_host() { | ||||||
|  |         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config.remove_static_mapping("mac-1"); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert!(hosts.is_empty()); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_remove_non_existent_mac() { | ||||||
|  |         let host = create_host("uuid-1", "host-1", "192.168.1.50", "mac-1,mac-2"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host.clone()]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config.remove_static_mapping("mac-nonexistent"); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 1); | ||||||
|  |         assert_eq!(hosts[0], host); // The host should be unchanged.
 | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_remove_mac_case_insensitively() { | ||||||
|  |         let host = create_host("uuid-1", "host-1", "192.168.1.50", "AA:BB:CC:DD:EE:FF"); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config.remove_static_mapping("aa:bb:cc:dd:ee:ff"); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert!(hosts.is_empty()); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     #[test] | ||||||
|  |     fn test_remove_mac_from_correct_host_only() { | ||||||
|  |         let host1 = create_host( | ||||||
|  |             "uuid-1", | ||||||
|  |             "host-1", | ||||||
|  |             "192.168.1.50", | ||||||
|  |             "AA:AA:AA:AA:AA:AA,BB:BB:BB:BB:BB:BB", | ||||||
|  |         ); | ||||||
|  |         let host2 = create_host( | ||||||
|  |             "uuid-2", | ||||||
|  |             "host-2", | ||||||
|  |             "192.168.1.51", | ||||||
|  |             "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD", | ||||||
|  |         ); | ||||||
|  |         let mut dhcp_config = setup_test_env(vec![host1.clone(), host2.clone()]); | ||||||
|  | 
 | ||||||
|  |         dhcp_config.remove_static_mapping("AA:AA:AA:AA:AA:AA"); | ||||||
|  | 
 | ||||||
|  |         let hosts = &dhcp_config.opnsense.dnsmasq.as_ref().unwrap().hosts; | ||||||
|  |         assert_eq!(hosts.len(), 2); | ||||||
|  |         let updated_host1 = hosts.iter().find(|h| h.uuid == "uuid-1").unwrap(); | ||||||
|  |         let unchanged_host2 = hosts.iter().find(|h| h.uuid == "uuid-2").unwrap(); | ||||||
|  | 
 | ||||||
|  |         assert_eq!(updated_host1.hwaddr.content_string(), "BB:BB:BB:BB:BB:BB"); | ||||||
|  |         assert_eq!( | ||||||
|  |             unchanged_host2.hwaddr.content_string(), | ||||||
|  |             "CC:CC:CC:CC:CC:CC,DD:DD:DD:DD:DD:DD" | ||||||
|  |         ); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | |||||||
							
								
								
									
										1674
									
								
								opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1674
									
								
								opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -215,7 +215,6 @@ | |||||||
|       <description>System Administrators</description> |       <description>System Administrators</description> | ||||||
|       <scope>system</scope> |       <scope>system</scope> | ||||||
|       <gid>1999</gid> |       <gid>1999</gid> | ||||||
|       <member>0</member> |  | ||||||
|       <member>2000</member> |       <member>2000</member> | ||||||
|       <priv>page-all</priv> |       <priv>page-all</priv> | ||||||
|     </group> |     </group> | ||||||
|  | |||||||
| @ -27,7 +27,6 @@ | |||||||
|       <description>System Administrators</description> |       <description>System Administrators</description> | ||||||
|       <scope>system</scope> |       <scope>system</scope> | ||||||
|       <gid>1999</gid> |       <gid>1999</gid> | ||||||
|       <member>0</member> |  | ||||||
|       <member>2000</member> |       <member>2000</member> | ||||||
|       <priv>page-all</priv> |       <priv>page-all</priv> | ||||||
|     </group> |     </group> | ||||||
|  | |||||||
| @ -27,7 +27,6 @@ | |||||||
|       <description>System Administrators</description> |       <description>System Administrators</description> | ||||||
|       <scope>system</scope> |       <scope>system</scope> | ||||||
|       <gid>1999</gid> |       <gid>1999</gid> | ||||||
|       <member>0</member> |  | ||||||
|       <member>2000</member> |       <member>2000</member> | ||||||
|       <priv>page-all</priv> |       <priv>page-all</priv> | ||||||
|     </group> |     </group> | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user