|
|
|
|
@@ -8,7 +8,7 @@ use crate::{
|
|
|
|
|
score::Score,
|
|
|
|
|
topology::{
|
|
|
|
|
BackendServer, HAClusterTopology, HealthCheck, HttpMethod, HttpStatusCode, LoadBalancer,
|
|
|
|
|
LoadBalancerService, SSL, Topology,
|
|
|
|
|
LoadBalancerService, LogicalHost, Router, SSL, Topology,
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
@@ -23,17 +23,45 @@ pub struct OKDLoadBalancerScore {
|
|
|
|
|
load_balancer_score: LoadBalancerScore,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// OKD Load Balancer Score configuration
|
|
|
|
|
///
|
|
|
|
|
/// This module configures the load balancer for OKD (OpenShift Kubernetes Distribution)
|
|
|
|
|
/// bare metal installations.
|
|
|
|
|
///
|
|
|
|
|
/// # Backend Server Configuration
|
|
|
|
|
///
|
|
|
|
|
/// For ports 80 and 443 (ingress traffic), the load balancer includes both control plane
|
|
|
|
|
/// and worker nodes in the backend pool. This is consistent with OKD's requirement that
|
|
|
|
|
/// ingress traffic should be load balanced across all nodes that may run ingress router pods.
|
|
|
|
|
///
|
|
|
|
|
/// For ports 22623 (Ignition API) and 6443 (Kubernetes API), only control plane nodes
|
|
|
|
|
/// are included as backends, as these services are control plane specific.
|
|
|
|
|
///
|
|
|
|
|
/// # References
|
|
|
|
|
///
|
|
|
|
|
/// - [OKD Bare Metal Installation - External Load Balancer Configuration]
|
|
|
|
|
/// (<https://docs.okd.io/latest/installing/installing_bare_metal/ipi/ipi-install-installation-workflow.html#nw-osp-configuring-external-load-balancer_ipi-install-installation-workflow>)
|
|
|
|
|
///
|
|
|
|
|
/// # Example
|
|
|
|
|
///
|
|
|
|
|
/// ```ignore
|
|
|
|
|
/// use harmony::topology::HAClusterTopology;
|
|
|
|
|
/// use harmony::modules::okd::OKDLoadBalancerScore;
|
|
|
|
|
///
|
|
|
|
|
/// let topology: HAClusterTopology = /* get topology from your infrastructure */;
|
|
|
|
|
/// let score = OKDLoadBalancerScore::new(&topology);
|
|
|
|
|
/// ```
|
|
|
|
|
impl OKDLoadBalancerScore {
|
|
|
|
|
pub fn new(topology: &HAClusterTopology) -> Self {
|
|
|
|
|
let public_ip = topology.router.get_gateway();
|
|
|
|
|
let public_services = vec![
|
|
|
|
|
LoadBalancerService {
|
|
|
|
|
backend_servers: Self::control_plane_to_backend_server(topology, 80),
|
|
|
|
|
backend_servers: Self::nodes_to_backend_server(topology, 80),
|
|
|
|
|
listening_port: SocketAddr::new(public_ip, 80),
|
|
|
|
|
health_check: Some(HealthCheck::TCP(None)),
|
|
|
|
|
},
|
|
|
|
|
LoadBalancerService {
|
|
|
|
|
backend_servers: Self::control_plane_to_backend_server(topology, 443),
|
|
|
|
|
backend_servers: Self::nodes_to_backend_server(topology, 443),
|
|
|
|
|
listening_port: SocketAddr::new(public_ip, 443),
|
|
|
|
|
health_check: Some(HealthCheck::TCP(None)),
|
|
|
|
|
},
|
|
|
|
|
@@ -41,12 +69,12 @@ impl OKDLoadBalancerScore {
|
|
|
|
|
|
|
|
|
|
let private_services = vec![
|
|
|
|
|
LoadBalancerService {
|
|
|
|
|
backend_servers: Self::control_plane_to_backend_server(topology, 80),
|
|
|
|
|
backend_servers: Self::nodes_to_backend_server(topology, 80),
|
|
|
|
|
listening_port: SocketAddr::new(public_ip, 80),
|
|
|
|
|
health_check: Some(HealthCheck::TCP(None)),
|
|
|
|
|
},
|
|
|
|
|
LoadBalancerService {
|
|
|
|
|
backend_servers: Self::control_plane_to_backend_server(topology, 443),
|
|
|
|
|
backend_servers: Self::nodes_to_backend_server(topology, 443),
|
|
|
|
|
listening_port: SocketAddr::new(public_ip, 443),
|
|
|
|
|
health_check: Some(HealthCheck::TCP(None)),
|
|
|
|
|
},
|
|
|
|
|
@@ -74,6 +102,11 @@ impl OKDLoadBalancerScore {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Creates backend servers list for control plane nodes only
|
|
|
|
|
///
|
|
|
|
|
/// Use this for control plane-specific services like:
|
|
|
|
|
/// - Port 22623: Ignition API (machine configuration during bootstrap)
|
|
|
|
|
/// - Port 6443: Kubernetes API server
|
|
|
|
|
fn control_plane_to_backend_server(
|
|
|
|
|
topology: &HAClusterTopology,
|
|
|
|
|
port: u16,
|
|
|
|
|
@@ -87,6 +120,216 @@ impl OKDLoadBalancerScore {
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Creates backend servers list for all nodes (control plane + workers)
|
|
|
|
|
///
|
|
|
|
|
/// Use this for ingress traffic that should be distributed across all nodes:
|
|
|
|
|
/// - Port 80: HTTP ingress traffic
|
|
|
|
|
/// - Port 443: HTTPS ingress traffic
|
|
|
|
|
///
|
|
|
|
|
/// In OKD, ingress router pods can run on any node, so both control plane
|
|
|
|
|
/// and worker nodes should be included in the load balancer backend pool.
|
|
|
|
|
fn nodes_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
|
|
|
|
|
let mut nodes = Vec::new();
|
|
|
|
|
for cp in &topology.control_plane {
|
|
|
|
|
nodes.push(BackendServer {
|
|
|
|
|
address: cp.ip.to_string(),
|
|
|
|
|
port,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
for worker in &topology.workers {
|
|
|
|
|
nodes.push(BackendServer {
|
|
|
|
|
address: worker.ip.to_string(),
|
|
|
|
|
port,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
nodes
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use std::sync::{Arc, OnceLock};
|
|
|
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
use crate::topology::DummyInfra;
|
|
|
|
|
use harmony_macros::ip;
|
|
|
|
|
use harmony_types::net::IpAddress;
|
|
|
|
|
|
|
|
|
|
fn create_test_topology() -> HAClusterTopology {
|
|
|
|
|
let router = Arc::new(DummyRouter {
|
|
|
|
|
gateway: ip!("192.168.1.1"),
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
HAClusterTopology {
|
|
|
|
|
domain_name: "test.example.com".to_string(),
|
|
|
|
|
router,
|
|
|
|
|
load_balancer: Arc::new(DummyInfra),
|
|
|
|
|
firewall: Arc::new(DummyInfra),
|
|
|
|
|
dhcp_server: Arc::new(DummyInfra),
|
|
|
|
|
tftp_server: Arc::new(DummyInfra),
|
|
|
|
|
http_server: Arc::new(DummyInfra),
|
|
|
|
|
dns_server: Arc::new(DummyInfra),
|
|
|
|
|
node_exporter: Arc::new(DummyInfra),
|
|
|
|
|
switch_client: Arc::new(DummyInfra),
|
|
|
|
|
bootstrap_host: LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.100"),
|
|
|
|
|
name: "bootstrap".to_string(),
|
|
|
|
|
},
|
|
|
|
|
control_plane: vec![
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.10"),
|
|
|
|
|
name: "control-plane-0".to_string(),
|
|
|
|
|
},
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.11"),
|
|
|
|
|
name: "control-plane-1".to_string(),
|
|
|
|
|
},
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.12"),
|
|
|
|
|
name: "control-plane-2".to_string(),
|
|
|
|
|
},
|
|
|
|
|
],
|
|
|
|
|
workers: vec![
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.20"),
|
|
|
|
|
name: "worker-0".to_string(),
|
|
|
|
|
},
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: ip!("192.168.1.21"),
|
|
|
|
|
name: "worker-1".to_string(),
|
|
|
|
|
},
|
|
|
|
|
],
|
|
|
|
|
kubeconfig: None,
|
|
|
|
|
network_manager: OnceLock::new(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct DummyRouter {
|
|
|
|
|
gateway: IpAddress,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Router for DummyRouter {
|
|
|
|
|
fn get_gateway(&self) -> IpAddress {
|
|
|
|
|
self.gateway
|
|
|
|
|
}
|
|
|
|
|
fn get_cidr(&self) -> cidr::Ipv4Cidr {
|
|
|
|
|
let ipv4 = match self.gateway {
|
|
|
|
|
IpAddress::V4(ip) => ip,
|
|
|
|
|
IpAddress::V6(_) => panic!("IPv6 not supported"),
|
|
|
|
|
};
|
|
|
|
|
cidr::Ipv4Cidr::new(ipv4, 24).unwrap()
|
|
|
|
|
}
|
|
|
|
|
fn get_host(&self) -> LogicalHost {
|
|
|
|
|
LogicalHost {
|
|
|
|
|
ip: self.gateway,
|
|
|
|
|
name: "router".to_string(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_nodes_to_backend_server_includes_control_plane_and_workers() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
|
|
|
|
|
let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 80);
|
|
|
|
|
|
|
|
|
|
assert_eq!(backend_servers.len(), 5);
|
|
|
|
|
|
|
|
|
|
let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect();
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.10"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.11"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.12"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.20"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.21"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_control_plane_to_backend_server_only_includes_control_plane() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
|
|
|
|
|
let backend_servers = OKDLoadBalancerScore::control_plane_to_backend_server(&topology, 80);
|
|
|
|
|
|
|
|
|
|
assert_eq!(backend_servers.len(), 3);
|
|
|
|
|
|
|
|
|
|
let addresses: Vec<&str> = backend_servers.iter().map(|s| s.address.as_str()).collect();
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.10"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.11"));
|
|
|
|
|
assert!(addresses.contains(&"192.168.1.12"));
|
|
|
|
|
assert!(!addresses.contains(&"192.168.1.20"));
|
|
|
|
|
assert!(!addresses.contains(&"192.168.1.21"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_public_services_include_all_nodes_on_port_80_and_443() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
let score = OKDLoadBalancerScore::new(&topology);
|
|
|
|
|
|
|
|
|
|
let public_service_80 = score
|
|
|
|
|
.load_balancer_score
|
|
|
|
|
.public_services
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|s| s.listening_port.port() == 80)
|
|
|
|
|
.expect("Public service on port 80 not found");
|
|
|
|
|
|
|
|
|
|
let public_service_443 = score
|
|
|
|
|
.load_balancer_score
|
|
|
|
|
.public_services
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|s| s.listening_port.port() == 443)
|
|
|
|
|
.expect("Public service on port 443 not found");
|
|
|
|
|
|
|
|
|
|
assert_eq!(public_service_80.backend_servers.len(), 5);
|
|
|
|
|
assert_eq!(public_service_443.backend_servers.len(), 5);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_private_service_port_22623_only_control_plane() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
let score = OKDLoadBalancerScore::new(&topology);
|
|
|
|
|
|
|
|
|
|
let private_service_22623 = score
|
|
|
|
|
.load_balancer_score
|
|
|
|
|
.private_services
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|s| s.listening_port.port() == 22623)
|
|
|
|
|
.expect("Private service on port 22623 not found");
|
|
|
|
|
|
|
|
|
|
assert_eq!(private_service_22623.backend_servers.len(), 3);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_private_service_port_6443_only_control_plane() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
let score = OKDLoadBalancerScore::new(&topology);
|
|
|
|
|
|
|
|
|
|
let private_service_6443 = score
|
|
|
|
|
.load_balancer_score
|
|
|
|
|
.private_services
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|s| s.listening_port.port() == 6443)
|
|
|
|
|
.expect("Private service on port 6443 not found");
|
|
|
|
|
|
|
|
|
|
assert_eq!(private_service_6443.backend_servers.len(), 3);
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(
|
|
|
|
|
private_service_6443.health_check,
|
|
|
|
|
Some(HealthCheck::HTTP(_, _, _, _))
|
|
|
|
|
),
|
|
|
|
|
"Expected HTTP health check for port 6443"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_all_backend_servers_have_correct_port() {
|
|
|
|
|
let topology = create_test_topology();
|
|
|
|
|
|
|
|
|
|
let backend_servers = OKDLoadBalancerScore::nodes_to_backend_server(&topology, 443);
|
|
|
|
|
|
|
|
|
|
for server in backend_servers {
|
|
|
|
|
assert_eq!(server.port, 443);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<T: Topology + LoadBalancer> Score<T> for OKDLoadBalancerScore {
|
|
|
|
|
|