From a93896707f56cc5f4a5505ff2dffc3781f3c7b29 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Sat, 7 Mar 2026 16:46:47 -0500 Subject: [PATCH 1/3] okd: add worker nodes to load balancer backend pool Include both control plane and worker nodes in ports 80 and 443 backend pools --- harmony/src/modules/okd/load_balancer.rs | 25 ++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/harmony/src/modules/okd/load_balancer.rs b/harmony/src/modules/okd/load_balancer.rs index 8a2b57f..e43cfea 100644 --- a/harmony/src/modules/okd/load_balancer.rs +++ b/harmony/src/modules/okd/load_balancer.rs @@ -28,12 +28,12 @@ impl OKDLoadBalancerScore { let public_ip = topology.router.get_gateway(); let public_services = vec![ LoadBalancerService { - backend_servers: Self::control_plane_to_backend_server(topology, 80), + backend_servers: Self::nodes_to_backend_server(topology, 80), listening_port: SocketAddr::new(public_ip, 80), health_check: Some(HealthCheck::TCP(None)), }, LoadBalancerService { - backend_servers: Self::control_plane_to_backend_server(topology, 443), + backend_servers: Self::nodes_to_backend_server(topology, 443), listening_port: SocketAddr::new(public_ip, 443), health_check: Some(HealthCheck::TCP(None)), }, @@ -41,12 +41,12 @@ impl OKDLoadBalancerScore { let private_services = vec![ LoadBalancerService { - backend_servers: Self::control_plane_to_backend_server(topology, 80), + backend_servers: Self::nodes_to_backend_server(topology, 80), listening_port: SocketAddr::new(public_ip, 80), health_check: Some(HealthCheck::TCP(None)), }, LoadBalancerService { - backend_servers: Self::control_plane_to_backend_server(topology, 443), + backend_servers: Self::nodes_to_backend_server(topology, 443), listening_port: SocketAddr::new(public_ip, 443), health_check: Some(HealthCheck::TCP(None)), }, @@ -87,6 +87,23 @@ impl OKDLoadBalancerScore { }) .collect() } + + fn nodes_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec { + let mut nodes = Vec::new(); + for cp in &topology.control_plane { + nodes.push(BackendServer { + address: cp.ip.to_string(), + port, + }); + } + for worker in &topology.workers { + nodes.push(BackendServer { + address: worker.ip.to_string(), + port, + }); + } + nodes + } } impl Score for OKDLoadBalancerScore { -- 2.39.5 From bc2b3282960d2be0cf05299d50690fa74b19d88d Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Sat, 7 Mar 2026 17:15:24 -0500 Subject: [PATCH 2/3] okd: include workers in load balancer backend pool + add tests and docs - Add nodes_to_backend_server() function to include both control plane and worker nodes - Update public services (ports 80, 443) to use worker-inclusive backend pool - Add comprehensive tests covering all backend configurations - Add documentation with OKD reference link and usage examples --- harmony/src/modules/okd/load_balancer.rs | 228 ++++++++++++++++++++++- 1 file changed, 227 insertions(+), 1 deletion(-) diff --git a/harmony/src/modules/okd/load_balancer.rs b/harmony/src/modules/okd/load_balancer.rs index e43cfea..298c45f 100644 --- a/harmony/src/modules/okd/load_balancer.rs +++ b/harmony/src/modules/okd/load_balancer.rs @@ -8,7 +8,7 @@ use crate::{ score::Score, topology::{ BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, - LoadBalancerService, SSL, Topology, + LoadBalancerService, LogicalHost, Router, Topology, SSL, }, }; @@ -23,6 +23,34 @@ pub struct OKDLoadBalancerScore { load_balancer_score: LoadBalancerScore, } +/// OKD Load Balancer Score configuration +/// +/// This module configures the load balancer for OKD (OpenShift Kubernetes Distribution) +/// bare metal installations. +/// +/// # Backend Server Configuration +/// +/// For ports 80 and 443 (ingress traffic), the load balancer includes both control plane +/// and worker nodes in the backend pool. This is consistent with OKD's requirement that +/// ingress traffic should be load balanced across all nodes that may run ingress router pods. +/// +/// For ports 22623 (Ignition API) and 6443 (Kubernetes API), only control plane nodes +/// are included as backends, as these services are control plane specific. +/// +/// # References +/// +/// - [OKD Bare Metal Installation - External Load Balancer Configuration] +/// () +/// +/// # Example +/// +/// ```no_run +/// use crate::topology::HAClusterTopology; +/// use harmony::modules::okd::OKDLoadBalancerScore; +/// +/// let topology: HAClusterTopology = /* get topology */; +/// let score = OKDLoadBalancerScore::new(&topology); +/// ``` impl OKDLoadBalancerScore { pub fn new(topology: &HAClusterTopology) -> Self { let public_ip = topology.router.get_gateway(); @@ -74,6 +102,11 @@ impl OKDLoadBalancerScore { } } + /// Creates backend servers list for control plane nodes only + /// + /// Use this for control plane-specific services like: + /// - Port 22623: Ignition API (machine configuration during bootstrap) + /// - Port 6443: Kubernetes API server fn control_plane_to_backend_server( topology: &HAClusterTopology, port: u16, @@ -88,6 +121,14 @@ impl OKDLoadBalancerScore { .collect() } + /// Creates backend servers list for all nodes (control plane + workers) + /// + /// Use this for ingress traffic that should be distributed across all nodes: + /// - Port 80: HTTP ingress traffic + /// - Port 443: HTTPS ingress traffic + /// + /// In OKD, ingress router pods can run on any node, so both control plane + /// and worker nodes should be included in the load balancer backend pool. fn nodes_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec { let mut nodes = Vec::new(); for cp in &topology.control_plane { @@ -106,6 +147,191 @@ impl OKDLoadBalancerScore { } } +#[cfg(test)] +mod tests { + use std::sync::{Arc, OnceLock}; + + use super::*; + use crate::topology::DummyInfra; + use harmony_macros::ip; + use harmony_types::net::IpAddress; + + fn create_test_topology() -> HAClusterTopology { + let router = Arc::new(DummyRouter { + gateway: ip!("192.168.1.1"), + }); + + HAClusterTopology { + domain_name: "test.example.com".to_string(), + router, + load_balancer: Arc::new(DummyInfra), + firewall: Arc::new(DummyInfra), + dhcp_server: Arc::new(DummyInfra), + tftp_server: Arc::new(DummyInfra), + http_server: Arc::new(DummyInfra), + dns_server: Arc::new(DummyInfra), + node_exporter: Arc::new(DummyInfra), + switch_client: Arc::new(DummyInfra), + bootstrap_host: LogicalHost { + ip: ip!("192.168.1.100"), + name: "bootstrap".to_string(), + }, + control_plane: vec![ + LogicalHost { + ip: ip!("192.168.1.10"), + name: "control-plane-0".to_string(), + }, + LogicalHost { + ip: ip!("192.168.1.11"), + name: "control-plane-1".to_string(), + }, + LogicalHost { + ip: ip!("192.168.1.12"), + name: "control-plane-2".to_string(), + }, + ], + workers: vec![ + LogicalHost { + ip: ip!("192.168.1.20"), + name: "worker-0".to_string(), + }, + LogicalHost { + ip: ip!("192.168.1.21"), + name: "worker-1".to_string(), + }, + ], + kubeconfig: None, + network_manager: OnceLock::new(), + } + } + + struct DummyRouter { + gateway: IpAddress, + } + + impl Router for DummyRouter { + fn get_gateway(&self) -> IpAddress { + self.gateway + } + fn get_cidr(&self) -> cidr::Ipv4Cidr { + let ipv4 = match self.gateway { + IpAddress::V4(ip) => ip, + IpAddress::V6(_) => panic!("IPv6 not supported"), + }; + cidr::Ipv4Cidr::new(ipv4, 24).unwrap() + } + fn get_host(&self) -> LogicalHost { + LogicalHost { + ip: self.gateway, + name: "router".to_string(), + } + } + } + + #[test] + fn test_nodes_to_backend_server_includes_control_plane_and_workers() { + let topology = create_test_topology(); + + let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 80); + + assert_eq!(backend_servers.len(), 5); + + let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect(); + assert!(addresses.contains(&"192.168.1.10")); + assert!(addresses.contains(&"192.168.1.11")); + assert!(addresses.contains(&"192.168.1.12")); + assert!(addresses.contains(&"192.168.1.20")); + assert!(addresses.contains(&"192.168.1.21")); + } + + #[test] + fn test_control_plane_to_backend_server_only_includes_control_plane() { + let topology = create_test_topology(); + + let backend_servers = OKDLoadBalancerScore::control_plane_to_backend_server(&topology, 80); + + assert_eq!(backend_servers.len(), 3); + + let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect(); + assert!(addresses.contains(&"192.168.1.10")); + assert!(addresses.contains(&"192.168.1.11")); + assert!(addresses.contains(&"192.168.1.12")); + assert!(!addresses.contains(&"192.168.1.20")); + assert!(!addresses.contains(&"192.168.1.21")); + } + + #[test] + fn test_public_services_include_all_nodes_on_port_80_and_443() { + let topology = create_test_topology(); + let score = OKDLoadBalancerScore::new(&topology); + + let public_service_80 = score + .load_balancer_score + .public_services + .iter() + .find(|s| s.listening_port.port() == 80) + .expect("Public service on port 80 not found"); + + let public_service_443 = score + .load_balancer_score + .public_services + .iter() + .find(|s| s.listening_port.port() == 443) + .expect("Public service on port 443 not found"); + + assert_eq!(public_service_80.backend_servers.len(), 5); + assert_eq!(public_service_443.backend_servers.len(), 5); + } + + #[test] + fn test_private_service_port_22623_only_control_plane() { + let topology = create_test_topology(); + let score = OKDLoadBalancerScore::new(&topology); + + let private_service_22623 = score + .load_balancer_score + .private_services + .iter() + .find(|s| s.listening_port.port() == 22623) + .expect("Private service on port 22623 not found"); + + assert_eq!(private_service_22623.backend_servers.len(), 3); + } + + #[test] + fn test_private_service_port_6443_only_control_plane() { + let topology = create_test_topology(); + let score = OKDLoadBalancerScore::new(&topology); + + let private_service_6443 = score + .load_balancer_score + .private_services + .iter() + .find(|s| s.listening_port.port() == 6443) + .expect("Private service on port 6443 not found"); + + assert_eq!(private_service_6443.backend_servers.len(), 3); + assert!( + matches!( + private_service_6443.health_check, + Some(HealthCheck::HTTP(_, _, _, _)) + ), + "Expected HTTP health check for port 6443" + ); + } + + #[test] + fn test_all_backend_servers_have_correct_port() { + let topology = create_test_topology(); + + let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 443); + + for server in backend_servers { + assert_eq!(server.port, 443); + } + } +} + impl Score for OKDLoadBalancerScore { fn create_interpret(&self) -> Box> { self.load_balancer_score.create_interpret() -- 2.39.5 From d0a1a73710d87743a82356dbc9f0016b53bcb5b1 Mon Sep 17 00:00:00 2001 From: Jean-Gabriel Gill-Couture Date: Sat, 7 Mar 2026 17:28:04 -0500 Subject: [PATCH 3/3] doc: fix example code to use ignore instead of no_run - fails because cannot be used at module level - Use to skip doc compilation while keeping example visible --- Cargo.lock | 20 -------------------- harmony/src/modules/okd/load_balancer.rs | 8 ++++---- 2 files changed, 4 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 10e2f35..131505a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3769,26 +3769,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "json-prompt" -version = "0.1.0" -dependencies = [ - "brocade", - "cidr", - "env_logger", - "harmony", - "harmony_cli", - "harmony_macros", - "harmony_secret", - "harmony_secret_derive", - "harmony_types", - "log", - "schemars 0.8.22", - "serde", - "tokio", - "url", -] - [[package]] name = "jsonpath-rust" version = "0.7.5" diff --git a/harmony/src/modules/okd/load_balancer.rs b/harmony/src/modules/okd/load_balancer.rs index 298c45f..e945558 100644 --- a/harmony/src/modules/okd/load_balancer.rs +++ b/harmony/src/modules/okd/load_balancer.rs @@ -8,7 +8,7 @@ use crate::{ score::Score, topology::{ BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, - LoadBalancerService, LogicalHost, Router, Topology, SSL, + LoadBalancerService, LogicalHost, Router, SSL, Topology, }, }; @@ -44,11 +44,11 @@ pub struct OKDLoadBalancerScore { /// /// # Example /// -/// ```no_run -/// use crate::topology::HAClusterTopology; +/// ```ignore +/// use harmony::topology::HAClusterTopology; /// use harmony::modules::okd::OKDLoadBalancerScore; /// -/// let topology: HAClusterTopology = /* get topology */; +/// let topology: HAClusterTopology = /* get topology from your infrastructure */; /// let score = OKDLoadBalancerScore::new(&topology); /// ``` impl OKDLoadBalancerScore { -- 2.39.5