diff --git a/Cargo.lock b/Cargo.lock index 321b2b1..d7ce77e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6080,6 +6080,21 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +[[package]] +name = "test-score" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "env_logger", + "harmony", + "harmony_cli", + "harmony_macros", + "harmony_types", + "log", + "tokio", + "url", +] + [[package]] name = "thiserror" version = "1.0.69" diff --git a/adr/015-higher-order-topologies.md b/adr/015-higher-order-topologies.md new file mode 100644 index 0000000..41c3172 --- /dev/null +++ b/adr/015-higher-order-topologies.md @@ -0,0 +1,114 @@ +# Architecture Decision Record: Higher-Order Topologies + +**Initial Author:** Jean-Gabriel Gill-Couture +**Initial Date:** 2025-12-08 +**Last Updated Date:** 2025-12-08 + +## Status + +Implemented + +## Context + +Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`). + +**Higher-Order Topologies** (e.g., `FailoverTopology`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`). + +Naive design requires manual `impl Capability for HigherOrderTopology` *per T per capability*, causing: +- **Impl explosion**: N topologies × M capabilities = N×M boilerplate. +- **ISP violation**: Topologies forced to impl unrelated capabilities. +- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order. +- **Barrier to extension**: Users can't easily add topologies without todos/panics. + +This makes scaling Harmony impractical as ecosystem grows. + +## Decision + +Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration: + +````rust +/// Higher-Order Topology: Orchestrates capabilities across sub-topologies. +pub struct FailoverTopology { + /// Primary sub-topology. + primary: T, + /// Replica sub-topology. + replica: T, +} + +/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`. +/// Delegates to primary for queries; orchestrates deploy across both. +#[async_trait] +impl PostgreSQL for FailoverTopology { + async fn deploy(&self, config: &PostgreSQLConfig) -> Result { + // Deploy primary; extract certs/endpoint; + // deploy replica with pg_basebackup + TLS passthrough. + // (Full impl logged/elaborated.) + } + + // Delegate queries to primary. + async fn get_replication_certs(&self, cluster_name: &str) -> Result { + self.primary.get_replication_certs(cluster_name).await + } + // ... +} + +/// Similarly for other capabilities. +#[async_trait] +impl Docker for FailoverTopology { + // Failover Docker orchestration. +} +```` + +**Key properties:** +- **Auto-derivation**: `Failover` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`. +- **No boilerplate**: One blanket impl per capability *per higher-order type*. + +## Rationale + +- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost. +- **Compile-time safety**: Missing `T: Capability` → compile error (no panics). +- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works. +- **ISP-respecting**: Capabilities only surface if sub-topology provides. +- **Centralized logic**: Orchestration (e.g., cert propagation) in one place. + +**Example usage:** +````rust +// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG +let pg_failover: FailoverTopology = ...; +pg_failover.deploy_pg(config).await; + +// ✅ Works: LinuxHost: Docker → Failover provides failover Docker +let docker_failover: FailoverTopology = ...; +docker_failover.deploy_docker(...).await; + +// ❌ Compile fail: K8sAnywhere !: Docker +let invalid: FailoverTopology; +invalid.deploy_docker(...); // `T: Docker` bound unsatisfied +```` + +## Consequences + +**Pros:** +- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover: PostgreSQL`. +- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`). +- **Observable**: Logs trace every step. + +**Cons:** +- **Monomorphization**: Generics generate code per T (mitigated: few Ts). +- **Delegation opacity**: Relies on rustdoc/logs for internals. + +## Alternatives considered + +| Approach | Pros | Cons | +|----------|------|------| +| **Manual per-T impls**
`impl PG for Failover {..}`
`impl PG for Failover {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. | +| **Dynamic trait objects**
`Box` | Runtime flex | Perf hit; type erasure; error-prone dispatch. | +| **Mega-topology trait**
All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. | +| **Registry dispatch**
Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. | + +**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition. + +## Additional Notes + +- Applies to `MultisiteTopology`, `ShardedTopology`, etc. +- `FailoverTopology` in `failover.rs` is first implementation. diff --git a/adr/015-higher-order-topologies/example.rs b/adr/015-higher-order-topologies/example.rs new file mode 100644 index 0000000..8c8911d --- /dev/null +++ b/adr/015-higher-order-topologies/example.rs @@ -0,0 +1,153 @@ +//! Example of Higher-Order Topologies in Harmony. +//! Demonstrates how `FailoverTopology` automatically provides failover for *any* capability +//! supported by a sub-topology `T` via blanket trait impls. +//! +//! Key insight: No manual impls per T or capability -- scales effortlessly. +//! Users can: +//! - Write new `Topology` (impl capabilities on a struct). +//! - Compose with `FailoverTopology` (gets capabilities if T has them). +//! - Compile fails if capability missing (safety). + +use async_trait::async_trait; +use tokio; + +/// Capability trait: Deploy and manage PostgreSQL. +#[async_trait] +pub trait PostgreSQL { + async fn deploy(&self, config: &PostgreSQLConfig) -> Result; + async fn get_replication_certs(&self, cluster_name: &str) -> Result; +} + +/// Capability trait: Deploy Docker. +#[async_trait] +pub trait Docker { + async fn deploy_docker(&self) -> Result; +} + +/// Configuration for PostgreSQL deployments. +#[derive(Clone)] +pub struct PostgreSQLConfig; + +/// Replication certificates. +#[derive(Clone)] +pub struct ReplicationCerts; + +/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL). +#[derive(Clone)] +pub struct K8sAnywhereTopology; + +#[async_trait] +impl PostgreSQL for K8sAnywhereTopology { + async fn deploy(&self, _config: &PostgreSQLConfig) -> Result { + // Real impl: Use k8s helm chart, operator, etc. + Ok("K8sAnywhere PostgreSQL deployed".to_string()) + } + + async fn get_replication_certs(&self, _cluster_name: &str) -> Result { + Ok(ReplicationCerts) + } +} + +/// Concrete topology: Linux Host (supports Docker). +#[derive(Clone)] +pub struct LinuxHostTopology; + +#[async_trait] +impl Docker for LinuxHostTopology { + async fn deploy_docker(&self) -> Result { + // Real impl: Install/configure Docker on host. + Ok("LinuxHost Docker deployed".to_string()) + } +} + +/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica). +/// Automatically derives *all* capabilities of `T` with failover orchestration. +/// +/// - If `T: PostgreSQL`, then `FailoverTopology: PostgreSQL` (blanket impl). +/// - Same for `Docker`, etc. No boilerplate! +/// - Compile-time safe: Missing `T: Capability` → error. +#[derive(Clone)] +pub struct FailoverTopology { + /// Primary sub-topology. + pub primary: T, + /// Replica sub-topology. + pub replica: T, +} + +/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL. +/// Delegates reads to primary; deploys to both. +#[async_trait] +impl PostgreSQL for FailoverTopology { + async fn deploy(&self, config: &PostgreSQLConfig) -> Result { + // Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup). + let primary_result = self.primary.deploy(config).await?; + let replica_result = self.replica.deploy(config).await?; + Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result)) + } + + async fn get_replication_certs(&self, cluster_name: &str) -> Result { + // Delegate to primary (replica follows). + self.primary.get_replication_certs(cluster_name).await + } +} + +/// Blanket impl: Failover Docker if T provides Docker. +#[async_trait] +impl Docker for FailoverTopology { + async fn deploy_docker(&self) -> Result { + // Orchestrate across primary + replica. + let primary_result = self.primary.deploy_docker().await?; + let replica_result = self.replica.deploy_docker().await?; + Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result)) + } +} + +#[tokio::main] +async fn main() { + let config = PostgreSQLConfig; + + println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ==="); + let pg_failover = FailoverTopology { + primary: K8sAnywhereTopology, + replica: K8sAnywhereTopology, + }; + let result = pg_failover.deploy(&config).await.unwrap(); + println!("Result: {}", result); + + println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ==="); + let docker_failover = FailoverTopology { + primary: LinuxHostTopology, + replica: LinuxHostTopology, + }; + let result = docker_failover.deploy_docker().await.unwrap(); + println!("Result: {}", result); + + println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ==="); + // let invalid = FailoverTopology { + // primary: K8sAnywhereTopology, + // replica: K8sAnywhereTopology, + // }; + // invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied! + // Very clear error message : + // error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology`, but its trait bounds were not satisfied + // --> src/main.rs:90:9 + // | + // 4 | pub struct FailoverTopology { + // | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology: Docker` + // ... + // 37 | struct K8sAnywhereTopology; + // | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker` + // ... + // 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied + // | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology` due to unsatisfied trait bounds + // | + // note: trait bound `K8sAnywhereTopology: Docker` was not satisfied + // --> src/main.rs:61:9 + // | + // 61 | impl Docker for FailoverTopology { + // | ^^^^^^ ------ ------------------- + // | | + // | unsatisfied trait bound introduced here + // note: the trait `Docker` must be implemented +} + diff --git a/examples/okd_installation/src/main.rs b/examples/okd_installation/src/main.rs index e581d5d..fddaa2e 100644 --- a/examples/okd_installation/src/main.rs +++ b/examples/okd_installation/src/main.rs @@ -4,7 +4,10 @@ use crate::topology::{get_inventory, get_topology}; use harmony::{ config::secret::SshKeyPair, data::{FileContent, FilePath}, - modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore}, + modules::{ + inventory::HarmonyDiscoveryStrategy, + okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore}, + }, score::Score, topology::HAClusterTopology, }; @@ -26,7 +29,8 @@ async fn main() { }, })]; - scores.append(&mut OKDInstallationPipeline::get_all_scores().await); + scores + .append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await); harmony_cli::run(inventory, topology, scores, None) .await diff --git a/harmony/src/domain/inventory/mod.rs b/harmony/src/domain/inventory/mod.rs index 7d160d7..10fabda 100644 --- a/harmony/src/domain/inventory/mod.rs +++ b/harmony/src/domain/inventory/mod.rs @@ -1,4 +1,6 @@ mod repository; +use std::fmt; + pub use repository::*; #[derive(Debug, new, Clone)] @@ -69,5 +71,14 @@ pub enum HostRole { Bootstrap, ControlPlane, Worker, - Storage, +} + +impl fmt::Display for HostRole { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HostRole::Bootstrap => write!(f, "Bootstrap"), + HostRole::ControlPlane => write!(f, "ControlPlane"), + HostRole::Worker => write!(f, "Worker"), + } + } } diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index dc6d4ac..2f9e348 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -9,9 +9,9 @@ use harmony_types::{ use log::debug; use log::info; -use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig}; -use crate::topology::PxeOptions; use crate::{data::FileContent, executors::ExecutorError}; +use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig}; +use crate::{modules::inventory::HarmonyDiscoveryStrategy, topology::PxeOptions}; use super::{ DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig, @@ -303,10 +303,7 @@ impl Switch for HAClusterTopology { async fn clear_port_channel(&self, ids: &Vec) -> Result<(), SwitchError> { todo!() } - async fn configure_interface( - &self, - ports: &Vec, - ) -> Result<(), SwitchError> { + async fn configure_interface(&self, ports: &Vec) -> Result<(), SwitchError> { todo!() } } @@ -532,6 +529,10 @@ impl SwitchClient for DummyInfra { ) -> Result { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } - async fn clear_port_channel(&self, ids: &Vec) -> Result<(), SwitchError> {todo!()} - async fn configure_interface(&self, ports: &Vec) -> Result<(), SwitchError> {todo!()} + async fn clear_port_channel(&self, ids: &Vec) -> Result<(), SwitchError> { + todo!() + } + async fn configure_interface(&self, ports: &Vec) -> Result<(), SwitchError> { + todo!() + } } diff --git a/harmony/src/infra/brocade.rs b/harmony/src/infra/brocade.rs index 6e45ce7..0b5c2a8 100644 --- a/harmony/src/infra/brocade.rs +++ b/harmony/src/infra/brocade.rs @@ -123,7 +123,10 @@ impl SwitchClient for BrocadeSwitchClient { } async fn configure_interface(&self, ports: &Vec) -> Result<(), SwitchError> { // FIXME hardcoded TenGigabitEthernet = bad - let ports = ports.iter().map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone())).collect(); + let ports = ports + .iter() + .map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone())) + .collect(); self.brocade .configure_interfaces(&ports) .await @@ -164,10 +167,11 @@ mod tests { client.setup().await.unwrap(); + //TODO not sure about this let configured_interfaces = brocade.configured_interfaces.lock().unwrap(); assert_that!(*configured_interfaces).contains_exactly(vec![ - (first_interface.port_location, PortOperatingMode::Access), - (second_interface.port_location, PortOperatingMode::Access), + (first_interface.name.clone(), PortOperatingMode::Access), + (second_interface.name.clone(), PortOperatingMode::Access), ]); } diff --git a/harmony/src/infra/network_manager.rs b/harmony/src/infra/network_manager.rs index 89321fe..a5a2f77 100644 --- a/harmony/src/infra/network_manager.rs +++ b/harmony/src/infra/network_manager.rs @@ -17,6 +17,12 @@ use crate::{ topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient}, }; +/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general +/// It is documented in nmstate official doc, but worth mentionning here : +/// +/// - You create a bond, nmstate will apply it +/// - You delete de bond from nmstate, it will NOT delete it +/// - To delete it you have to update it with configuration set to null pub struct OpenShiftNmStateNetworkManager { k8s_client: Arc, } @@ -31,6 +37,7 @@ impl std::fmt::Debug for OpenShiftNmStateNetworkManager { impl NetworkManager for OpenShiftNmStateNetworkManager { async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> { debug!("Installing NMState controller..."); + // TODO use operatorhub maybe? self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml ").unwrap(), Some("nmstate")) .await?; @@ -135,8 +142,6 @@ impl OpenShiftNmStateNetworkManager { description: Some(format!("Member of bond {bond_name}")), r#type: nmstate::InterfaceType::Ethernet, state: "up".to_string(), - mtu: Some(switch_port.interface.mtu), - mac_address: Some(switch_port.interface.mac_address.to_string()), ipv4: Some(nmstate::IpStackSpec { enabled: Some(false), ..Default::default() @@ -162,7 +167,7 @@ impl OpenShiftNmStateNetworkManager { interfaces.push(nmstate::Interface { name: bond_name.to_string(), - description: Some(format!("Network bond for host {host}")), + description: Some(format!("HARMONY - Network bond for host {host}")), r#type: nmstate::InterfaceType::Bond, state: "up".to_string(), copy_mac_from, diff --git a/harmony/src/modules/inventory/discovery.rs b/harmony/src/modules/inventory/discovery.rs index 3890a1a..7e063ec 100644 --- a/harmony/src/modules/inventory/discovery.rs +++ b/harmony/src/modules/inventory/discovery.rs @@ -17,7 +17,7 @@ use crate::{ pub struct DiscoverHostForRoleScore { pub role: HostRole, pub number_desired_hosts: i16, - pub discovery_strategy : HarmonyDiscoveryStrategy, + pub discovery_strategy: HarmonyDiscoveryStrategy, } impl Score for DiscoverHostForRoleScore { diff --git a/harmony/src/modules/okd/bootstrap_03_control_plane.rs b/harmony/src/modules/okd/bootstrap_03_control_plane.rs index 2ad3bf5..8ec4457 100644 --- a/harmony/src/modules/okd/bootstrap_03_control_plane.rs +++ b/harmony/src/modules/okd/bootstrap_03_control_plane.rs @@ -1,20 +1,10 @@ use crate::{ - data::Version, - hardware::PhysicalHost, - infra::inventory::InventoryRepositoryFactory, - interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::{HostRole, Inventory}, - modules::{ - dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore, - inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy}, okd::templates::BootstrapIpxeTpl, - }, + interpret::Interpret, + inventory::HostRole, + modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret}, score::Score, - topology::{HAClusterTopology, HostBinding}, + topology::HAClusterTopology, }; -use async_trait::async_trait; -use derive_new::new; -use harmony_types::id::Id; -use log::{debug, info}; use serde::Serialize; // ------------------------------------------------------------------------------------------------- @@ -23,232 +13,23 @@ use serde::Serialize; // - Persist bonding via MachineConfigs (or NNCP) once SCOS is active. // ------------------------------------------------------------------------------------------------- -#[derive(Debug, Clone, Serialize, new)] -pub struct OKDSetup03ControlPlaneScore {} +#[derive(Debug, Clone, Serialize)] +pub struct OKDSetup03ControlPlaneScore { + pub discovery_strategy: HarmonyDiscoveryStrategy, +} impl Score for OKDSetup03ControlPlaneScore { fn create_interpret(&self) -> Box> { - Box::new(OKDSetup03ControlPlaneInterpret::new()) + // TODO: Implement a step to wait for the control plane nodes to join the cluster + // and for the cluster operators to become available. This would be similar to + // the `wait-for bootstrap-complete` command. + Box::new(OKDNodeInterpret::new( + HostRole::ControlPlane, + self.discovery_strategy.clone(), + )) } fn name(&self) -> String { "OKDSetup03ControlPlaneScore".to_string() } } - -#[derive(Debug, Clone)] -pub struct OKDSetup03ControlPlaneInterpret { - version: Version, - status: InterpretStatus, -} - -impl OKDSetup03ControlPlaneInterpret { - pub fn new() -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - status: InterpretStatus::QUEUED, - } - } - - /// Ensures that three physical hosts are discovered and available for the ControlPlane role. - /// It will trigger discovery if not enough hosts are found. - async fn get_nodes( - &self, - inventory: &Inventory, - topology: &HAClusterTopology, - ) -> Result, InterpretError> { - const REQUIRED_HOSTS: i16 = 3; - let repo = InventoryRepositoryFactory::build().await?; - let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; - - info!( - "Discovery of {} control plane hosts in progress, current number {}", - REQUIRED_HOSTS, - control_plane_hosts.len() - ); - // This score triggers the discovery agent for a specific role. - DiscoverHostForRoleScore { - role: HostRole::ControlPlane, - number_desired_hosts: REQUIRED_HOSTS, - discovery_strategy: HarmonyDiscoveryStrategy::MDNS, - } - .interpret(inventory, topology) - .await?; - - let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?; - - if control_plane_hosts.len() < REQUIRED_HOSTS as usize { - return Err(InterpretError::new(format!( - "OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.", - REQUIRED_HOSTS, - control_plane_hosts.len() - ))); - } - - // Take exactly the number of required hosts to ensure consistency. - Ok(control_plane_hosts - .into_iter() - .take(REQUIRED_HOSTS as usize) - .collect()) - } - - /// Configures DHCP host bindings for all control plane nodes. - async fn configure_host_binding( - &self, - inventory: &Inventory, - topology: &HAClusterTopology, - nodes: &Vec, - ) -> Result<(), InterpretError> { - info!("[ControlPlane] Configuring host bindings for control plane nodes."); - - // Ensure the topology definition matches the number of physical nodes found. - if topology.control_plane.len() != nodes.len() { - return Err(InterpretError::new(format!( - "Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).", - topology.control_plane.len(), - nodes.len() - ))); - } - - // Create a binding for each physical host to its corresponding logical host. - let bindings: Vec = topology - .control_plane - .iter() - .zip(nodes.iter()) - .map(|(logical_host, physical_host)| { - info!( - "Creating binding: Logical Host '{}' -> Physical Host ID '{}'", - logical_host.name, physical_host.id - ); - HostBinding { - logical_host: logical_host.clone(), - physical_host: physical_host.clone(), - } - }) - .collect(); - - DhcpHostBindingScore { - host_binding: bindings, - domain: Some(topology.domain_name.clone()), - } - .interpret(inventory, topology) - .await?; - - Ok(()) - } - - /// Renders and deploys a per-MAC iPXE boot file for each control plane node. - async fn configure_ipxe( - &self, - inventory: &Inventory, - topology: &HAClusterTopology, - nodes: &Vec, - ) -> Result<(), InterpretError> { - info!("[ControlPlane] Rendering per-MAC iPXE configurations."); - - // The iPXE script content is the same for all control plane nodes, - // pointing to the 'master.ign' ignition file. - let content = BootstrapIpxeTpl { - http_ip: &topology.http_server.get_ip().to_string(), - scos_path: "scos", - ignition_http_path: "okd_ignition_files", - installation_device: "/dev/sda", // This might need to be configurable per-host in the future - ignition_file_name: "master.ign", // Control plane nodes use the master ignition file - } - .to_string(); - - debug!("[ControlPlane] iPXE content template:\n{content}"); - - // Create and apply an iPXE boot file for each node. - for node in nodes { - let mac_address = node.get_mac_address(); - if mac_address.is_empty() { - return Err(InterpretError::new(format!( - "Physical host with ID '{}' has no MAC addresses defined.", - node.id - ))); - } - info!( - "[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}", - node.id, mac_address - ); - - IPxeMacBootFileScore { - mac_address, - content: content.clone(), - } - .interpret(inventory, topology) - .await?; - } - - Ok(()) - } - - /// Prompts the user to reboot the target control plane nodes. - async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { - let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); - info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",); - - let confirmation = inquire::Confirm::new( - &format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")), - ) - .prompt() - .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; - - if !confirmation { - return Err(InterpretError::new( - "User aborted the operation.".to_string(), - )); - } - - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup03ControlPlaneInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup03ControlPlane") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - inventory: &Inventory, - topology: &HAClusterTopology, - ) -> Result { - // 1. Ensure we have 3 physical hosts for the control plane. - let nodes = self.get_nodes(inventory, topology).await?; - - // 2. Create DHCP reservations for the control plane nodes. - self.configure_host_binding(inventory, topology, &nodes) - .await?; - - // 3. Create iPXE files for each control plane node to boot from the master ignition. - self.configure_ipxe(inventory, topology, &nodes).await?; - - // 4. Reboot the nodes to start the OS installation. - self.reboot_targets(&nodes).await?; - - // TODO: Implement a step to wait for the control plane nodes to join the cluster - // and for the cluster operators to become available. This would be similar to - // the `wait-for bootstrap-complete` command. - info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually."); - - Ok(Outcome::success( - "Control plane provisioning has been successfully initiated.".into(), - )) - } -} diff --git a/harmony/src/modules/okd/bootstrap_04_workers.rs b/harmony/src/modules/okd/bootstrap_04_workers.rs index 461cab9..53e32c5 100644 --- a/harmony/src/modules/okd/bootstrap_04_workers.rs +++ b/harmony/src/modules/okd/bootstrap_04_workers.rs @@ -1,13 +1,9 @@ -use async_trait::async_trait; -use derive_new::new; -use harmony_types::id::Id; -use log::info; use serde::Serialize; use crate::{ - data::Version, - interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, - inventory::Inventory, + interpret::Interpret, + inventory::HostRole, + modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret}, score::Score, topology::HAClusterTopology, }; @@ -18,66 +14,20 @@ use crate::{ // - Persist bonding via MC/NNCP as required (same approach as masters). // ------------------------------------------------------------------------------------------------- -#[derive(Debug, Clone, Serialize, new)] -pub struct OKDSetup04WorkersScore {} +#[derive(Debug, Clone, Serialize)] +pub struct OKDSetup04WorkersScore { + pub discovery_strategy: HarmonyDiscoveryStrategy, +} impl Score for OKDSetup04WorkersScore { fn create_interpret(&self) -> Box> { - Box::new(OKDSetup04WorkersInterpret::new(self.clone())) + Box::new(OKDNodeInterpret::new( + HostRole::ControlPlane, + self.discovery_strategy.clone(), + )) } fn name(&self) -> String { "OKDSetup04WorkersScore".to_string() } } - -#[derive(Debug, Clone)] -pub struct OKDSetup04WorkersInterpret { - score: OKDSetup04WorkersScore, - version: Version, - status: InterpretStatus, -} - -impl OKDSetup04WorkersInterpret { - pub fn new(score: OKDSetup04WorkersScore) -> Self { - let version = Version::from("1.0.0").unwrap(); - Self { - version, - score, - status: InterpretStatus::QUEUED, - } - } - - async fn render_and_reboot(&self) -> Result<(), InterpretError> { - info!("[Workers] Rendering per-MAC PXE for workers and rebooting"); - Ok(()) - } -} - -#[async_trait] -impl Interpret for OKDSetup04WorkersInterpret { - fn get_name(&self) -> InterpretName { - InterpretName::Custom("OKDSetup04Workers") - } - - fn get_version(&self) -> Version { - self.version.clone() - } - - fn get_status(&self) -> InterpretStatus { - self.status.clone() - } - - fn get_children(&self) -> Vec { - vec![] - } - - async fn execute( - &self, - _inventory: &Inventory, - _topology: &HAClusterTopology, - ) -> Result { - self.render_and_reboot().await?; - Ok(Outcome::success("Workers provisioned".into())) - } -} diff --git a/harmony/src/modules/okd/bootstrap_okd_node.rs b/harmony/src/modules/okd/bootstrap_okd_node.rs new file mode 100644 index 0000000..4fd4c8b --- /dev/null +++ b/harmony/src/modules/okd/bootstrap_okd_node.rs @@ -0,0 +1,313 @@ +use async_trait::async_trait; +use derive_new::new; +use harmony_types::id::Id; +use log::{debug, info}; +use serde::Serialize; + +use crate::{ + data::Version, + hardware::PhysicalHost, + infra::inventory::InventoryRepositoryFactory, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::{HostRole, Inventory}, + modules::{ + dhcp::DhcpHostBindingScore, + http::IPxeMacBootFileScore, + inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy}, + okd::{ + okd_node::{BootstrapRole, ControlPlaneRole, OKDRoleProperties, WorkerRole}, + templates::BootstrapIpxeTpl, + }, + }, + score::Score, + topology::{HAClusterTopology, HostBinding, LogicalHost}, +}; + +#[derive(Debug, Clone, Serialize, new)] +pub struct OKDNodeInstallationScore { + host_role: HostRole, + discovery_strategy: HarmonyDiscoveryStrategy, +} + +impl Score for OKDNodeInstallationScore { + fn name(&self) -> String { + "OKDNodeScore".to_string() + } + + fn create_interpret(&self) -> Box> { + Box::new(OKDNodeInterpret::new( + self.host_role.clone(), + self.discovery_strategy.clone(), + )) + } +} + +#[derive(Debug, Clone)] +pub struct OKDNodeInterpret { + host_role: HostRole, + discovery_strategy: HarmonyDiscoveryStrategy, +} + +impl OKDNodeInterpret { + pub fn new(host_role: HostRole, discovery_strategy: HarmonyDiscoveryStrategy) -> Self { + Self { + host_role, + discovery_strategy, + } + } + + fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties { + match role { + HostRole::Bootstrap => &BootstrapRole, + HostRole::ControlPlane => &ControlPlaneRole, + HostRole::Worker => &WorkerRole, + } + } + + async fn get_nodes( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result, InterpretError> { + let repo = InventoryRepositoryFactory::build().await?; + + let mut hosts = repo.get_host_for_role(&self.host_role).await?; + + let okd_host_properties = self.okd_role_properties(&self.host_role); + + let required_hosts: i16 = okd_host_properties.required_hosts(); + + info!( + "Discovery of {} {} hosts in progress, current number {}", + required_hosts, + self.host_role, + hosts.len() + ); + // This score triggers the discovery agent for a specific role. + DiscoverHostForRoleScore { + role: self.host_role.clone(), + number_desired_hosts: required_hosts, + discovery_strategy: self.discovery_strategy.clone(), + } + .interpret(inventory, topology) + .await?; + + hosts = repo.get_host_for_role(&self.host_role).await?; + + if hosts.len() < required_hosts.try_into().unwrap_or(0) { + Err(InterpretError::new(format!( + "OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.", + required_hosts, + self.host_role, + hosts.len() + ))) + } else { + // Take exactly the number of required hosts to ensure consistency. + Ok(hosts + .into_iter() + .take(required_hosts.try_into().unwrap()) + .collect()) + } + } + + /// Configures DHCP host bindings for all nodes. + async fn configure_host_binding( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + nodes: &Vec, + ) -> Result<(), InterpretError> { + info!( + "[{}] Configuring host bindings for {} plane nodes.", + self.host_role, self.host_role, + ); + + let host_properties = self.okd_role_properties(&self.host_role); + + self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?; + + let bindings: Vec = + self.host_bindings(nodes, host_properties.logical_hosts(topology)); + + DhcpHostBindingScore { + host_binding: bindings, + domain: Some(topology.domain_name.clone()), + } + .interpret(inventory, topology) + .await?; + + Ok(()) + } + + // Ensure the topology definition matches the number of physical nodes found. + fn validate_host_node_match( + &self, + nodes: &Vec, + hosts: &Vec, + ) -> Result<(), InterpretError> { + if hosts.len() != nodes.len() { + return Err(InterpretError::new(format!( + "Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).", + hosts.len(), + nodes.len() + ))); + } + Ok(()) + } + + // Create a binding for each physical host to its corresponding logical host. + fn host_bindings( + &self, + nodes: &Vec, + hosts: &Vec, + ) -> Vec { + hosts + .iter() + .zip(nodes.iter()) + .map(|(logical_host, physical_host)| { + info!( + "Creating binding: Logical Host '{}' -> Physical Host ID '{}'", + logical_host.name, physical_host.id + ); + HostBinding { + logical_host: logical_host.clone(), + physical_host: physical_host.clone(), + } + }) + .collect() + } + + /// Renders and deploys a per-MAC iPXE boot file for each node. + async fn configure_ipxe( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + nodes: &Vec, + ) -> Result<(), InterpretError> { + info!( + "[{}] Rendering per-MAC iPXE configurations.", + self.host_role + ); + + let okd_role_properties = self.okd_role_properties(&self.host_role); + // The iPXE script content is the same for all control plane nodes, + // pointing to the 'master.ign' ignition file. + let content = BootstrapIpxeTpl { + http_ip: &topology.http_server.get_ip().to_string(), + scos_path: "scos", + ignition_http_path: "okd_ignition_files", + //TODO must be refactored to not only use /dev/sda + installation_device: "/dev/sda", // This might need to be configurable per-host in the future + ignition_file_name: okd_role_properties.ignition_file(), + } + .to_string(); + + debug!("[{}] iPXE content template:\n{content}", self.host_role); + + // Create and apply an iPXE boot file for each node. + for node in nodes { + let mac_address = node.get_mac_address(); + if mac_address.is_empty() { + return Err(InterpretError::new(format!( + "Physical host with ID '{}' has no MAC addresses defined.", + node.id + ))); + } + info!( + "[{}] Applying iPXE config for node ID '{}' with MACs: {:?}", + self.host_role, node.id, mac_address + ); + + IPxeMacBootFileScore { + mac_address, + content: content.clone(), + } + .interpret(inventory, topology) + .await?; + } + + Ok(()) + } + + /// Prompts the user to reboot the target control plane nodes. + async fn reboot_targets(&self, nodes: &Vec) -> Result<(), InterpretError> { + let node_ids: Vec = nodes.iter().map(|n| n.id.to_string()).collect(); + info!( + "[{}] Requesting reboot for control plane nodes: {node_ids:?}", + self.host_role + ); + + let confirmation = inquire::Confirm::new( + &format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")), + ) + .prompt() + .map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?; + + if !confirmation { + return Err(InterpretError::new( + "User aborted the operation.".to_string(), + )); + } + + Ok(()) + } +} + +#[async_trait] +impl Interpret for OKDNodeInterpret { + async fn execute( + &self, + inventory: &Inventory, + topology: &HAClusterTopology, + ) -> Result { + // 1. Ensure we have the specfied number of physical hosts. + let nodes = self.get_nodes(inventory, topology).await?; + + // 2. Create DHCP reservations for the nodes. + self.configure_host_binding(inventory, topology, &nodes) + .await?; + + // 3. Create iPXE files for each node to boot from the ignition. + self.configure_ipxe(inventory, topology, &nodes).await?; + + // 4. Reboot the nodes to start the OS installation. + self.reboot_targets(&nodes).await?; + // TODO: Implement a step to validate that the installation of the nodes is + // complete and for the cluster operators to become available. + // + // The OpenShift installer only provides two wait commands which currently need to be + // run manually: + // - `openshift-install wait-for bootstrap-complete` + // - `openshift-install wait-for install-complete` + // + // There is no installer command that waits specifically for worker node + // provisioning. Worker nodes join asynchronously (via ignition + CSR approval), + // and the cluster becomes fully functional only once all nodes are Ready and the + // cluster operators report Available=True. + info!( + "[{}] Provisioning initiated. Monitor the cluster convergence manually.", + self.host_role + ); + + Ok(Outcome::success(format!( + "{} provisioning has been successfully initiated.", + self.host_role + ))) + } + + fn get_name(&self) -> InterpretName { + InterpretName::Custom("OKDNodeSetup".into()) + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/okd/crd/nmstate.rs b/harmony/src/modules/okd/crd/nmstate.rs index f0eb4ae..3055766 100644 --- a/harmony/src/modules/okd/crd/nmstate.rs +++ b/harmony/src/modules/okd/crd/nmstate.rs @@ -417,6 +417,7 @@ pub struct EthernetSpec { #[serde(rename_all = "kebab-case")] pub struct BondSpec { pub mode: String, + #[serde(alias = "port")] pub ports: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub options: Option>, diff --git a/harmony/src/modules/okd/host_network.rs b/harmony/src/modules/okd/host_network.rs index a84cf19..bc79b8d 100644 --- a/harmony/src/modules/okd/host_network.rs +++ b/harmony/src/modules/okd/host_network.rs @@ -258,7 +258,8 @@ mod tests { use crate::{ hardware::HostCategory, topology::{ - HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort + HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome, + SwitchError, SwitchPort, }, }; use std::{ diff --git a/harmony/src/modules/okd/installation.rs b/harmony/src/modules/okd/installation.rs index 3deb59a..dce2457 100644 --- a/harmony/src/modules/okd/installation.rs +++ b/harmony/src/modules/okd/installation.rs @@ -48,10 +48,13 @@ //! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd). use crate::{ - modules::okd::{ - OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, - OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore, - bootstrap_06_installation_report::OKDSetup06InstallationReportScore, + modules::{ + inventory::HarmonyDiscoveryStrategy, + okd::{ + OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore, + OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore, + bootstrap_06_installation_report::OKDSetup06InstallationReportScore, + }, }, score::Score, topology::HAClusterTopology, @@ -60,13 +63,19 @@ use crate::{ pub struct OKDInstallationPipeline; impl OKDInstallationPipeline { - pub async fn get_all_scores() -> Vec>> { + pub async fn get_all_scores( + discovery_strategy: HarmonyDiscoveryStrategy, + ) -> Vec>> { vec![ Box::new(OKDSetup01InventoryScore::new()), Box::new(OKDSetup02BootstrapScore::new()), - Box::new(OKDSetup03ControlPlaneScore::new()), + Box::new(OKDSetup03ControlPlaneScore { + discovery_strategy: discovery_strategy.clone(), + }), Box::new(OKDSetupPersistNetworkBondScore::new()), - Box::new(OKDSetup04WorkersScore::new()), + Box::new(OKDSetup04WorkersScore { + discovery_strategy: discovery_strategy.clone(), + }), Box::new(OKDSetup05SanityCheckScore::new()), Box::new(OKDSetup06InstallationReportScore::new()), ] diff --git a/harmony/src/modules/okd/mod.rs b/harmony/src/modules/okd/mod.rs index 8bb85ef..1cf66bc 100644 --- a/harmony/src/modules/okd/mod.rs +++ b/harmony/src/modules/okd/mod.rs @@ -6,12 +6,14 @@ mod bootstrap_05_sanity_check; mod bootstrap_06_installation_report; pub mod bootstrap_dhcp; pub mod bootstrap_load_balancer; +pub mod bootstrap_okd_node; mod bootstrap_persist_network_bond; pub mod dhcp; pub mod dns; pub mod installation; pub mod ipxe; pub mod load_balancer; +pub mod okd_node; pub mod templates; pub mod upgrade; pub use bootstrap_01_prepare::*; diff --git a/harmony/src/modules/okd/okd_node.rs b/harmony/src/modules/okd/okd_node.rs new file mode 100644 index 0000000..d04478b --- /dev/null +++ b/harmony/src/modules/okd/okd_node.rs @@ -0,0 +1,54 @@ +use crate::topology::{HAClusterTopology, LogicalHost}; + +pub trait OKDRoleProperties { + fn ignition_file(&self) -> &'static str; + fn required_hosts(&self) -> i16; + fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec; +} + +pub struct BootstrapRole; +pub struct ControlPlaneRole; +pub struct WorkerRole; +pub struct StorageRole; + +impl OKDRoleProperties for BootstrapRole { + fn ignition_file(&self) -> &'static str { + "bootstrap.ign" + } + + fn required_hosts(&self) -> i16 { + 1 + } + + fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec { + todo!() + } +} + +impl OKDRoleProperties for ControlPlaneRole { + fn ignition_file(&self) -> &'static str { + "master.ign" + } + + fn required_hosts(&self) -> i16 { + 3 + } + + fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec { + &t.control_plane + } +} + +impl OKDRoleProperties for WorkerRole { + fn ignition_file(&self) -> &'static str { + "worker.ign" + } + + fn required_hosts(&self) -> i16 { + 2 + } + + fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec { + &t.workers + } +} diff --git a/harmony_macros/src/lib.rs b/harmony_macros/src/lib.rs index 87ac818..0869fd3 100644 --- a/harmony_macros/src/lib.rs +++ b/harmony_macros/src/lib.rs @@ -135,15 +135,17 @@ pub fn ingress_path(input: TokenStream) -> TokenStream { #[proc_macro] pub fn cidrv4(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as LitStr); - let cidr_str = input.value(); + let lit = parse_macro_input!(input as LitStr); - if cidr_str.parse::().is_ok() { - let expanded = quote! { #cidr_str.parse::().unwrap() }; - return TokenStream::from(expanded); - } + // This is the IMPORTANT part: + // we re-emit the *string literal itself* + let expanded = quote! { + #lit + .parse::() + .expect("Invalid IPv4 CIDR literal") + }; - panic!("Invalid IPv4 CIDR : {}", cidr_str); + TokenStream::from(expanded) } /// Creates a `harmony_types::net::Url::Url` from a string literal. diff --git a/harmony_types/src/net.rs b/harmony_types/src/net.rs index 51de86e..6086e54 100644 --- a/harmony_types/src/net.rs +++ b/harmony_types/src/net.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct MacAddress(pub [u8; 6]); impl MacAddress { @@ -19,6 +19,14 @@ impl From<&MacAddress> for String { } } +impl std::fmt::Debug for MacAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("MacAddress") + .field(&String::from(self)) + .finish() + } +} + impl std::fmt::Display for MacAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(&String::from(self)) diff --git a/harmony_types/src/switch.rs b/harmony_types/src/switch.rs index 611207d..4cf19a6 100644 --- a/harmony_types/src/switch.rs +++ b/harmony_types/src/switch.rs @@ -1,6 +1,6 @@ -use std::{fmt, str::FromStr}; use log::trace; use serde::Serialize; +use std::{fmt, str::FromStr}; /// Simple error type for port parsing failures. #[derive(Debug)] @@ -74,7 +74,8 @@ pub enum PortDeclaration { Single(PortLocation), /// A Named port, often used for virtual ports such as PortChannels. Example /// ```rust - /// PortDeclaration::Named("1".to_string()) + /// # use harmony_types::switch::PortDeclaration; + /// PortDeclaration::Named("1".to_string()); /// ``` Named(String), /// A strictly sequential range defined by two endpoints using the hyphen separator (`-`). @@ -140,8 +141,19 @@ impl PortDeclaration { match PortLocation::from_str(port_str) { Ok(loc) => Ok(PortDeclaration::Single(loc)), Err(e) => { - trace!("Failed to parse PortLocation {port_str} : {e}"); - trace!("Falling back on named port"); + let segments: Vec<&str> = port_str.split('/').collect(); + let segment_count = segments.len(); + + // Logic: + // If it has 3 segments but failed (e.g., "1/A/1"), it's an InvalidSegment. + // If it has MORE than 3 segments (e.g., "1/1/1/1" or "1/1/1/"), it's an InvalidFormat. + if segment_count >= 3 { + return Err(e); + } + + // Otherwise, it's something else entirely (e.g., "eth0", "vlan10"), + // so we treat it as a Named port. + trace!("Falling back on named port for: {port_str}"); Ok(PortDeclaration::Named(port_str.to_string())) } } diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index b0aedc2..1114038 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -106,11 +106,37 @@ pub struct HAProxy { pub groups: MaybeString, pub users: MaybeString, pub cpus: MaybeString, - pub resolvers: MaybeString, + pub resolvers: HAProxyResolvers, pub mailers: MaybeString, pub maintenance: Maintenance, } +#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +pub struct HAProxyResolvers { + #[yaserde(rename = "resolver")] + pub resolver: Option, +} + +#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +pub struct Resolver { + pub id: String, + pub enabled: i32, + pub name: String, + pub description: MaybeString, + pub nameservers: String, + pub parse_resolv_conf: String, + pub resolve_retries: i32, + pub timeout_resolve: String, + pub timeout_retry: String, + pub accepted_payload_size: MaybeString, + pub hold_valid: MaybeString, + pub hold_obsolete: MaybeString, + pub hold_refused: MaybeString, + pub hold_nx: MaybeString, + pub hold_timeout: MaybeString, + pub hold_other: MaybeString, +} + #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Maintenance { #[yaserde(rename = "cronjobs")] diff --git a/opnsense-config-xml/src/data/opnsense.rs b/opnsense-config-xml/src/data/opnsense.rs index 8a2f64f..bf4e652 100644 --- a/opnsense-config-xml/src/data/opnsense.rs +++ b/opnsense-config-xml/src/data/opnsense.rs @@ -136,6 +136,7 @@ pub struct Rule { pub updated: Option, pub created: Option, pub disabled: Option, + pub log: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] @@ -216,7 +217,7 @@ pub struct System { pub maximumfrags: Option, pub aliasesresolveinterval: Option, pub maximumtableentries: Option, - pub language: String, + pub language: Option, pub dnsserver: Option, pub dns1gw: Option, pub dns2gw: Option, @@ -1152,6 +1153,7 @@ pub struct UnboundGeneral { pub local_zone_type: String, pub outgoing_interface: MaybeString, pub enable_wpad: MaybeString, + pub safesearch: MaybeString, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] @@ -1205,15 +1207,15 @@ pub struct Acls { #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Dnsbl { - pub enabled: i32, - pub safesearch: MaybeString, + pub enabled: Option, + pub safesearch: Option, #[yaserde(rename = "type")] - pub r#type: MaybeString, - pub lists: MaybeString, - pub whitelists: MaybeString, - pub blocklists: MaybeString, - pub wildcards: MaybeString, - pub address: MaybeString, + pub r#type: Option, + pub lists: Option, + pub whitelists: Option, + pub blocklists: Option, + pub wildcards: Option, + pub address: Option, pub nxdomain: Option, } @@ -1241,6 +1243,7 @@ pub struct Host { pub ttl: Option, pub server: String, pub description: Option, + pub txtdata: MaybeString, } impl Host { @@ -1256,6 +1259,7 @@ impl Host { ttl: Some(MaybeString::default()), mx: MaybeString::default(), description: None, + txtdata: MaybeString::default(), } } } diff --git a/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml b/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml index f36e4f7..737766c 100644 --- a/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml +++ b/opnsense-config/src/tests/data/config-25.7-dnsmasq-static-host.xml @@ -612,6 +612,7 @@ transparent 0 + 0 diff --git a/opnsense-config/src/tests/data/config-full-1.xml b/opnsense-config/src/tests/data/config-full-1.xml index 378d577..9b417f2 100644 --- a/opnsense-config/src/tests/data/config-full-1.xml +++ b/opnsense-config/src/tests/data/config-full-1.xml @@ -2003,6 +2003,7 @@ transparent + @@ -2071,6 +2072,7 @@ 192.168.20.161 Some app local + 1 @@ -2081,6 +2083,7 @@ 192.168.20.161 Some app local + 1 @@ -2091,6 +2094,7 @@ 192.168.20.161 Some app local + @@ -2117,6 +2121,7 @@ + 03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f diff --git a/opnsense-config/src/tests/data/config-full-25.7-dnsmasq-options.xml b/opnsense-config/src/tests/data/config-full-25.7-dnsmasq-options.xml index 879d8d6..d2303a9 100644 --- a/opnsense-config/src/tests/data/config-full-25.7-dnsmasq-options.xml +++ b/opnsense-config/src/tests/data/config-full-25.7-dnsmasq-options.xml @@ -614,6 +614,7 @@ transparent 0 + 0 diff --git a/opnsense-config/src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml b/opnsense-config/src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml index 5e22137..f7d7739 100644 --- a/opnsense-config/src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml +++ b/opnsense-config/src/tests/data/config-full-25.7-dummy-dnsmasq-options.xml @@ -750,6 +750,7 @@ transparent 0 + 0 diff --git a/opnsense-config/src/tests/data/config-full-25.7.xml b/opnsense-config/src/tests/data/config-full-25.7.xml index 1cd4909..eccdee3 100644 --- a/opnsense-config/src/tests/data/config-full-25.7.xml +++ b/opnsense-config/src/tests/data/config-full-25.7.xml @@ -709,6 +709,7 @@ transparent 0 + 0 diff --git a/opnsense-config/src/tests/data/config-full-ncd0.xml b/opnsense-config/src/tests/data/config-full-ncd0.xml index 9243cf2..6cb6186 100644 --- a/opnsense-config/src/tests/data/config-full-ncd0.xml +++ b/opnsense-config/src/tests/data/config-full-ncd0.xml @@ -951,6 +951,7 @@ transparent + 0 diff --git a/opnsense-config/src/tests/data/config-opnsense-25.1.xml b/opnsense-config/src/tests/data/config-opnsense-25.1.xml index c6bc1a8..0c9a6f1 100644 --- a/opnsense-config/src/tests/data/config-opnsense-25.1.xml +++ b/opnsense-config/src/tests/data/config-opnsense-25.1.xml @@ -808,6 +808,7 @@ transparent + diff --git a/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml b/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml index f41b055..09cac61 100644 --- a/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml +++ b/opnsense-config/src/tests/data/config-structure-with-dhcp-staticmap-entry.xml @@ -726,6 +726,7 @@ transparent + 0 @@ -793,6 +794,7 @@ 192.168.20.161 Some app local + 1 @@ -803,6 +805,7 @@ 192.168.20.161 Some app local + 1 @@ -813,6 +816,7 @@ 192.168.20.161 Some app local + @@ -840,6 +844,7 @@ 03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f + diff --git a/opnsense-config/src/tests/data/config-structure.xml b/opnsense-config/src/tests/data/config-structure.xml index 32c9317..ae26f76 100644 --- a/opnsense-config/src/tests/data/config-structure.xml +++ b/opnsense-config/src/tests/data/config-structure.xml @@ -718,6 +718,7 @@ transparent + 0 @@ -785,6 +786,7 @@ 192.168.20.161 Some app local + 1 @@ -795,6 +797,7 @@ 192.168.20.161 Some app local + 1 @@ -805,6 +808,7 @@ 192.168.20.161 Some app local + @@ -832,6 +836,7 @@ 03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f + diff --git a/opnsense-config/src/tests/data/config-vm-test.xml b/opnsense-config/src/tests/data/config-vm-test.xml index 1d176b4..06429df 100644 --- a/opnsense-config/src/tests/data/config-vm-test.xml +++ b/opnsense-config/src/tests/data/config-vm-test.xml @@ -869,6 +869,7 @@ transparent + diff --git a/opnsense-config/src/tests/data/config-vm-test_cheat_descr.xml b/opnsense-config/src/tests/data/config-vm-test_cheat_descr.xml index 4f1442a..a38a712 100644 --- a/opnsense-config/src/tests/data/config-vm-test_cheat_descr.xml +++ b/opnsense-config/src/tests/data/config-vm-test_cheat_descr.xml @@ -862,6 +862,7 @@ transparent + diff --git a/opnsense-config/src/tests/data/config-vm-test_linted.xml b/opnsense-config/src/tests/data/config-vm-test_linted.xml index 1d176b4..06429df 100644 --- a/opnsense-config/src/tests/data/config-vm-test_linted.xml +++ b/opnsense-config/src/tests/data/config-vm-test_linted.xml @@ -869,6 +869,7 @@ transparent +