Compare commits

..

6 Commits

Author SHA1 Message Date
d0a1a73710 doc: fix example code to use ignore instead of no_run
All checks were successful
Run Check Script / check (pull_request) Successful in 1m43s
-  fails because  cannot be used at module level
- Use  to skip doc compilation while keeping example visible
2026-03-07 17:30:24 -05:00
bc2b328296 okd: include workers in load balancer backend pool + add tests and docs
Some checks failed
Run Check Script / check (pull_request) Failing after 24s
- Add nodes_to_backend_server() function to include both control plane and worker nodes
- Update public services (ports 80, 443) to use worker-inclusive backend pool
- Add comprehensive tests covering all backend configurations
- Add documentation with OKD reference link and usage examples
2026-03-07 17:15:24 -05:00
a93896707f okd: add worker nodes to load balancer backend pool
All checks were successful
Run Check Script / check (pull_request) Successful in 1m29s
Include both control plane and worker nodes in ports 80 and 443 backend pools
2026-03-07 16:46:47 -05:00
0e9b23a320 Merge branch 'feat/change-node-readiness-strategy'
Some checks failed
Run Check Script / check (push) Successful in 1m26s
Compile and package harmony_composer / package_harmony_composer (push) Failing after 2m11s
2026-03-07 16:35:14 -05:00
5412c34957 Merge pull request 'fix: change vlan definition from MaybeString to RawXml' (#245) from feat/opnsense-config-xml-support-vlan into master
Some checks failed
Run Check Script / check (push) Successful in 1m47s
Compile and package harmony_composer / package_harmony_composer (push) Failing after 2m7s
Reviewed-on: #245
2026-03-07 20:59:28 +00:00
55de206523 fix: change vlan definition from MaybeString to RawXml
All checks were successful
Run Check Script / check (pull_request) Successful in 1m29s
2026-03-07 10:03:03 -05:00
2 changed files with 249 additions and 6 deletions

View File

@@ -8,7 +8,7 @@ use crate::{
score::Score, score::Score,
topology::{ topology::{
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer, BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
LoadBalancerService, SSL, Topology, LoadBalancerService, LogicalHost, Router, SSL, Topology,
}, },
}; };
@@ -23,17 +23,45 @@ pub struct OKDLoadBalancerScore {
load_balancer_score: LoadBalancerScore, load_balancer_score: LoadBalancerScore,
} }
/// OKD Load Balancer Score configuration
///
/// This module configures the load balancer for OKD (OpenShift Kubernetes Distribution)
/// bare metal installations.
///
/// # Backend Server Configuration
///
/// For ports 80 and 443 (ingress traffic), the load balancer includes both control plane
/// and worker nodes in the backend pool. This is consistent with OKD's requirement that
/// ingress traffic should be load balanced across all nodes that may run ingress router pods.
///
/// For ports 22623 (Ignition API) and 6443 (Kubernetes API), only control plane nodes
/// are included as backends, as these services are control plane specific.
///
/// # References
///
/// - [OKD Bare Metal Installation - External Load Balancer Configuration]
/// (<https://docs.okd.io/latest/installing/installing_bare_metal/ipi/ipi-install-installation-workflow.html#nw-osp-configuring-external-load-balancer_ipi-install-installation-workflow>)
///
/// # Example
///
/// ```ignore
/// use harmony::topology::HAClusterTopology;
/// use harmony::modules::okd::OKDLoadBalancerScore;
///
/// let topology: HAClusterTopology = /* get topology from your infrastructure */;
/// let score = OKDLoadBalancerScore::new(&topology);
/// ```
impl OKDLoadBalancerScore { impl OKDLoadBalancerScore {
pub fn new(topology: &HAClusterTopology) -> Self { pub fn new(topology: &HAClusterTopology) -> Self {
let public_ip = topology.router.get_gateway(); let public_ip = topology.router.get_gateway();
let public_services = vec![ let public_services = vec![
LoadBalancerService { LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 80), backend_servers: Self::nodes_to_backend_server(topology, 80),
listening_port: SocketAddr::new(public_ip, 80), listening_port: SocketAddr::new(public_ip, 80),
health_check: Some(HealthCheck::TCP(None)), health_check: Some(HealthCheck::TCP(None)),
}, },
LoadBalancerService { LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 443), backend_servers: Self::nodes_to_backend_server(topology, 443),
listening_port: SocketAddr::new(public_ip, 443), listening_port: SocketAddr::new(public_ip, 443),
health_check: Some(HealthCheck::TCP(None)), health_check: Some(HealthCheck::TCP(None)),
}, },
@@ -41,12 +69,12 @@ impl OKDLoadBalancerScore {
let private_services = vec![ let private_services = vec![
LoadBalancerService { LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 80), backend_servers: Self::nodes_to_backend_server(topology, 80),
listening_port: SocketAddr::new(public_ip, 80), listening_port: SocketAddr::new(public_ip, 80),
health_check: Some(HealthCheck::TCP(None)), health_check: Some(HealthCheck::TCP(None)),
}, },
LoadBalancerService { LoadBalancerService {
backend_servers: Self::control_plane_to_backend_server(topology, 443), backend_servers: Self::nodes_to_backend_server(topology, 443),
listening_port: SocketAddr::new(public_ip, 443), listening_port: SocketAddr::new(public_ip, 443),
health_check: Some(HealthCheck::TCP(None)), health_check: Some(HealthCheck::TCP(None)),
}, },
@@ -74,6 +102,11 @@ impl OKDLoadBalancerScore {
} }
} }
/// Creates backend servers list for control plane nodes only
///
/// Use this for control plane-specific services like:
/// - Port 22623: Ignition API (machine configuration during bootstrap)
/// - Port 6443: Kubernetes API server
fn control_plane_to_backend_server( fn control_plane_to_backend_server(
topology: &HAClusterTopology, topology: &HAClusterTopology,
port: u16, port: u16,
@@ -87,6 +120,216 @@ impl OKDLoadBalancerScore {
}) })
.collect() .collect()
} }
/// Creates backend servers list for all nodes (control plane + workers)
///
/// Use this for ingress traffic that should be distributed across all nodes:
/// - Port 80: HTTP ingress traffic
/// - Port 443: HTTPS ingress traffic
///
/// In OKD, ingress router pods can run on any node, so both control plane
/// and worker nodes should be included in the load balancer backend pool.
fn nodes_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut nodes = Vec::new();
for cp in &topology.control_plane {
nodes.push(BackendServer {
address: cp.ip.to_string(),
port,
});
}
for worker in &topology.workers {
nodes.push(BackendServer {
address: worker.ip.to_string(),
port,
});
}
nodes
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, OnceLock};
use super::*;
use crate::topology::DummyInfra;
use harmony_macros::ip;
use harmony_types::net::IpAddress;
fn create_test_topology() -> HAClusterTopology {
let router = Arc::new(DummyRouter {
gateway: ip!("192.168.1.1"),
});
HAClusterTopology {
domain_name: "test.example.com".to_string(),
router,
load_balancer: Arc::new(DummyInfra),
firewall: Arc::new(DummyInfra),
dhcp_server: Arc::new(DummyInfra),
tftp_server: Arc::new(DummyInfra),
http_server: Arc::new(DummyInfra),
dns_server: Arc::new(DummyInfra),
node_exporter: Arc::new(DummyInfra),
switch_client: Arc::new(DummyInfra),
bootstrap_host: LogicalHost {
ip: ip!("192.168.1.100"),
name: "bootstrap".to_string(),
},
control_plane: vec![
LogicalHost {
ip: ip!("192.168.1.10"),
name: "control-plane-0".to_string(),
},
LogicalHost {
ip: ip!("192.168.1.11"),
name: "control-plane-1".to_string(),
},
LogicalHost {
ip: ip!("192.168.1.12"),
name: "control-plane-2".to_string(),
},
],
workers: vec![
LogicalHost {
ip: ip!("192.168.1.20"),
name: "worker-0".to_string(),
},
LogicalHost {
ip: ip!("192.168.1.21"),
name: "worker-1".to_string(),
},
],
kubeconfig: None,
network_manager: OnceLock::new(),
}
}
struct DummyRouter {
gateway: IpAddress,
}
impl Router for DummyRouter {
fn get_gateway(&self) -> IpAddress {
self.gateway
}
fn get_cidr(&self) -> cidr::Ipv4Cidr {
let ipv4 = match self.gateway {
IpAddress::V4(ip) => ip,
IpAddress::V6(_) => panic!("IPv6 not supported"),
};
cidr::Ipv4Cidr::new(ipv4, 24).unwrap()
}
fn get_host(&self) -> LogicalHost {
LogicalHost {
ip: self.gateway,
name: "router".to_string(),
}
}
}
#[test]
fn test_nodes_to_backend_server_includes_control_plane_and_workers() {
let topology = create_test_topology();
let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 80);
assert_eq!(backend_servers.len(), 5);
let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect();
assert!(addresses.contains(&"192.168.1.10"));
assert!(addresses.contains(&"192.168.1.11"));
assert!(addresses.contains(&"192.168.1.12"));
assert!(addresses.contains(&"192.168.1.20"));
assert!(addresses.contains(&"192.168.1.21"));
}
#[test]
fn test_control_plane_to_backend_server_only_includes_control_plane() {
let topology = create_test_topology();
let backend_servers = OKDLoadBalancerScore::control_plane_to_backend_server(&topology, 80);
assert_eq!(backend_servers.len(), 3);
let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect();
assert!(addresses.contains(&"192.168.1.10"));
assert!(addresses.contains(&"192.168.1.11"));
assert!(addresses.contains(&"192.168.1.12"));
assert!(!addresses.contains(&"192.168.1.20"));
assert!(!addresses.contains(&"192.168.1.21"));
}
#[test]
fn test_public_services_include_all_nodes_on_port_80_and_443() {
let topology = create_test_topology();
let score = OKDLoadBalancerScore::new(&topology);
let public_service_80 = score
.load_balancer_score
.public_services
.iter()
.find(|s| s.listening_port.port() == 80)
.expect("Public service on port 80 not found");
let public_service_443 = score
.load_balancer_score
.public_services
.iter()
.find(|s| s.listening_port.port() == 443)
.expect("Public service on port 443 not found");
assert_eq!(public_service_80.backend_servers.len(), 5);
assert_eq!(public_service_443.backend_servers.len(), 5);
}
#[test]
fn test_private_service_port_22623_only_control_plane() {
let topology = create_test_topology();
let score = OKDLoadBalancerScore::new(&topology);
let private_service_22623 = score
.load_balancer_score
.private_services
.iter()
.find(|s| s.listening_port.port() == 22623)
.expect("Private service on port 22623 not found");
assert_eq!(private_service_22623.backend_servers.len(), 3);
}
#[test]
fn test_private_service_port_6443_only_control_plane() {
let topology = create_test_topology();
let score = OKDLoadBalancerScore::new(&topology);
let private_service_6443 = score
.load_balancer_score
.private_services
.iter()
.find(|s| s.listening_port.port() == 6443)
.expect("Private service on port 6443 not found");
assert_eq!(private_service_6443.backend_servers.len(), 3);
assert!(
matches!(
private_service_6443.health_check,
Some(HealthCheck::HTTP(_, _, _, _))
),
"Expected HTTP health check for port 6443"
);
}
#[test]
fn test_all_backend_servers_have_correct_port() {
let topology = create_test_topology();
let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 443);
for server in backend_servers {
assert_eq!(server.port, 443);
}
}
} }
impl<T: Topology + LoadBalancer> Score<T> for OKDLoadBalancerScore { impl<T: Topology + LoadBalancer> Score<T> for OKDLoadBalancerScore {

View File

@@ -1540,7 +1540,7 @@ pub struct Dyndns {
pub struct Vlans { pub struct Vlans {
#[yaserde(attribute = true)] #[yaserde(attribute = true)]
pub version: String, pub version: String,
pub vlan: MaybeString, pub vlan: RawXml,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]