From e9a1aa4831c81e9c98210f42781411183bb23db7 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Mon, 1 Sep 2025 07:39:53 -0400 Subject: [PATCH 1/6] fix: merge existing services in load balancer config --- harmony/src/domain/topology/load_balancer.rs | 8 +- harmony/src/infra/opnsense/load_balancer.rs | 6 +- opnsense-config-xml/src/data/haproxy.rs | 2 +- opnsense-config/src/config/manager/ssh.rs | 3 +- opnsense-config/src/modules/load_balancer.rs | 82 ++++++++++++++++++-- 5 files changed, 79 insertions(+), 22 deletions(-) diff --git a/harmony/src/domain/topology/load_balancer.rs b/harmony/src/domain/topology/load_balancer.rs index 3a38103..bf855d8 100644 --- a/harmony/src/domain/topology/load_balancer.rs +++ b/harmony/src/domain/topology/load_balancer.rs @@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync { &self, service: &LoadBalancerService, ) -> Result<(), ExecutorError> { - debug!( - "Listing LoadBalancer services {:?}", - self.list_services().await - ); - if !self.list_services().await.contains(service) { - self.add_service(service).await?; - } + self.add_service(service).await?; Ok(()) } } diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index 9414faf..9b8ec33 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -24,13 +24,11 @@ impl LoadBalancer for OPNSenseFirewall { } async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { - warn!( - "TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here" - ); let mut config = self.opnsense_config.write().await; + let mut load_balancer = config.load_balancer(); + let (frontend, backend, servers, healthcheck) = harmony_load_balancer_service_to_haproxy_xml(service); - let mut load_balancer = config.load_balancer(); load_balancer.add_backend(backend); load_balancer.add_frontend(frontend); load_balancer.add_servers(servers); diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index ef631f3..7aa407c 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId { } } -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct HAProxyId(String); impl Default for HAProxyId { diff --git a/opnsense-config/src/config/manager/ssh.rs b/opnsense-config/src/config/manager/ssh.rs index fb525ea..30719e1 100644 --- a/opnsense-config/src/config/manager/ssh.rs +++ b/opnsense-config/src/config/manager/ssh.rs @@ -29,8 +29,7 @@ impl SshConfigManager { self.opnsense_shell .exec(&format!( - "cp /conf/config.xml /conf/backup/{}", - backup_filename + "cp /conf/config.xml /conf/backup/{backup_filename}" )) .await } diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 6c71ed4..8769a5b 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -40,21 +40,87 @@ impl<'a> LoadBalancerConfig<'a> { self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32); } - pub fn add_backend(&mut self, backend: HAProxyBackend) { + /// Adds or updates a backend pool. + /// If a backend with the same name exists, it is updated. Otherwise, it is added. + pub fn add_backend(&mut self, mut backend: HAProxyBackend) { warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks"); - self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend)); + self.with_haproxy(|haproxy| { + let existing_backend = haproxy + .backends + .backends + .iter_mut() + .find(|b| b.name == backend.name); + + if let Some(existing_backend) = existing_backend { + backend.uuid = existing_backend.uuid.clone(); // This breaks the `frontend` config + // as it is now relying on a stale uuid + backend.id = existing_backend.id.clone(); + *existing_backend = backend; + } else { + haproxy.backends.backends.push(backend); + } + }); } - pub fn add_frontend(&mut self, frontend: Frontend) { - self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend)); + /// Adds or updates a frontend. + /// If a frontend with the same name exists, it is updated. Otherwise, it is added. + pub fn add_frontend(&mut self, mut frontend: Frontend) { + self.with_haproxy(|haproxy| { + let existing_frontend = haproxy + .frontends + .frontend + .iter_mut() + .find(|f| f.name == frontend.name); + + if let Some(existing_frontend) = existing_frontend { + frontend.uuid = existing_frontend.uuid.clone(); + frontend.id = existing_frontend.id.clone(); + *existing_frontend = frontend; + } else { + haproxy.frontends.frontend.push(frontend); + } + }); } - pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) { - self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck)); + /// Adds or updates a health check. + /// If a health check with the same name exists, it is updated. Otherwise, it is added. + pub fn add_healthcheck(&mut self, mut healthcheck: HAProxyHealthCheck) { + self.with_haproxy(|haproxy| { + let existing_healthcheck = haproxy + .healthchecks + .healthchecks + .iter_mut() + .find(|h| h.name == healthcheck.name); + + if let Some(existing_check) = existing_healthcheck { + healthcheck.uuid = existing_check.uuid.clone(); + *existing_check = healthcheck; + } else { + haproxy.healthchecks.healthchecks.push(healthcheck); + } + }); } - pub fn add_servers(&mut self, mut servers: Vec) { - self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers)); + /// Adds or updates a list of servers to the HAProxy configuration. + /// If a server with the same name already exists, it is updated. Otherwise, it is added. + pub fn add_servers(&mut self, servers: Vec) { + self.with_haproxy(|haproxy| { + for server in servers { + let existing_server = haproxy + .servers + .servers + .iter_mut() + .find(|s| s.name == server.name); + + if let Some(existing_server) = existing_server { + existing_server.address = server.address; + existing_server.port = server.port; + existing_server.enabled = server.enabled; + } else { + haproxy.servers.servers.push(server); + } + } + }); } pub async fn reload_restart(&self) -> Result<(), Error> { -- 2.39.5 From fc4c18cceaa50a3bc721362540c935d638803f30 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Wed, 3 Sep 2025 15:58:28 -0400 Subject: [PATCH 2/6] remove old service components (frontend, backend, servers, healthcheck) with same bind address before adding new service --- harmony/src/infra/opnsense/load_balancer.rs | 8 +- opnsense-config/src/modules/load_balancer.rs | 161 ++++++++++--------- 2 files changed, 88 insertions(+), 81 deletions(-) diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index 9b8ec33..7715944 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -29,12 +29,8 @@ impl LoadBalancer for OPNSenseFirewall { let (frontend, backend, servers, healthcheck) = harmony_load_balancer_service_to_haproxy_xml(service); - load_balancer.add_backend(backend); - load_balancer.add_frontend(frontend); - load_balancer.add_servers(servers); - if let Some(healthcheck) = healthcheck { - load_balancer.add_healthcheck(healthcheck); - } + + load_balancer.configure_service(frontend, backend, servers, healthcheck); Ok(()) } diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 8769a5b..d3393cc 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use log::warn; use opnsense_config_xml::{ @@ -40,86 +40,51 @@ impl<'a> LoadBalancerConfig<'a> { self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32); } - /// Adds or updates a backend pool. - /// If a backend with the same name exists, it is updated. Otherwise, it is added. - pub fn add_backend(&mut self, mut backend: HAProxyBackend) { - warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks"); - self.with_haproxy(|haproxy| { - let existing_backend = haproxy - .backends - .backends - .iter_mut() - .find(|b| b.name == backend.name); + /// Configures a service by removing any existing service on the same port + /// and then adding the new definition. This ensures idempotency. + pub fn configure_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { + self.remove_service_by_bind_address(&frontend.bind); + self.add_new_service(frontend, backend, servers, healthcheck); + } - if let Some(existing_backend) = existing_backend { - backend.uuid = existing_backend.uuid.clone(); // This breaks the `frontend` config - // as it is now relying on a stale uuid - backend.id = existing_backend.id.clone(); - *existing_backend = backend; - } else { - haproxy.backends.backends.push(backend); - } + /// Removes a service and its dependent components based on the frontend's bind address. + /// This performs a cascading delete of the frontend, backend, servers, and health check. + fn remove_service_by_bind_address(&mut self, bind_address: &str) { + self.with_haproxy(|haproxy| { + let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else { + return; + }; + + let Some(old_backend) = remove_backend(haproxy, old_frontend) else { + return; + }; + + remove_healthcheck(haproxy, &old_backend); + remove_servers(haproxy, &old_backend); }); } - /// Adds or updates a frontend. - /// If a frontend with the same name exists, it is updated. Otherwise, it is added. - pub fn add_frontend(&mut self, mut frontend: Frontend) { + /// Adds the components of a new service to the HAProxy configuration. + fn add_new_service( + &mut self, + frontend: Frontend, + backend: HAProxyBackend, + servers: Vec, + healthcheck: Option, + ) { self.with_haproxy(|haproxy| { - let existing_frontend = haproxy - .frontends - .frontend - .iter_mut() - .find(|f| f.name == frontend.name); - - if let Some(existing_frontend) = existing_frontend { - frontend.uuid = existing_frontend.uuid.clone(); - frontend.id = existing_frontend.id.clone(); - *existing_frontend = frontend; - } else { - haproxy.frontends.frontend.push(frontend); - } - }); - } - - /// Adds or updates a health check. - /// If a health check with the same name exists, it is updated. Otherwise, it is added. - pub fn add_healthcheck(&mut self, mut healthcheck: HAProxyHealthCheck) { - self.with_haproxy(|haproxy| { - let existing_healthcheck = haproxy - .healthchecks - .healthchecks - .iter_mut() - .find(|h| h.name == healthcheck.name); - - if let Some(existing_check) = existing_healthcheck { - healthcheck.uuid = existing_check.uuid.clone(); - *existing_check = healthcheck; - } else { - haproxy.healthchecks.healthchecks.push(healthcheck); - } - }); - } - - /// Adds or updates a list of servers to the HAProxy configuration. - /// If a server with the same name already exists, it is updated. Otherwise, it is added. - pub fn add_servers(&mut self, servers: Vec) { - self.with_haproxy(|haproxy| { - for server in servers { - let existing_server = haproxy - .servers - .servers - .iter_mut() - .find(|s| s.name == server.name); - - if let Some(existing_server) = existing_server { - existing_server.address = server.address; - existing_server.port = server.port; - existing_server.enabled = server.enabled; - } else { - haproxy.servers.servers.push(server); - } + if let Some(check) = healthcheck { + haproxy.healthchecks.healthchecks.push(check); } + haproxy.servers.servers.extend(servers); + haproxy.backends.backends.push(backend); + haproxy.frontends.frontend.push(frontend); }); } @@ -148,3 +113,49 @@ impl<'a> LoadBalancerConfig<'a> { Ok(()) } } + +fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option { + let pos = haproxy + .frontends + .frontend + .iter() + .position(|f| f.bind == bind_address); + + match pos { + Some(pos) => Some(haproxy.frontends.frontend.remove(pos)), + None => None, + } +} + +fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option { + let pos = haproxy + .backends + .backends + .iter() + .position(|b| b.uuid == old_frontend.default_backend); + + match pos { + Some(pos) => Some(haproxy.backends.backends.remove(pos)), + None => None, // orphaned frontend, shouldn't happen + } +} + +fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(uuid) = &backend.health_check.content { + haproxy + .healthchecks + .healthchecks + .retain(|h| h.uuid != *uuid); + } +} + +/// Remove the backend's servers. This assumes servers are not shared between services. +fn remove_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { + if let Some(server_uuids_str) = &backend.linked_servers.content { + let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect(); + haproxy + .servers + .servers + .retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str())); + } +} -- 2.39.5 From 01206f5db1442c6c13dbe4da104455a0948622a9 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Wed, 3 Sep 2025 17:18:26 -0400 Subject: [PATCH 3/6] de-duplicate stuff --- opnsense-config-xml/src/data/dnsmasq.rs | 21 ++++++++++++++++++ opnsense-config-xml/src/data/opnsense.rs | 7 ++++-- opnsense-config/src/modules/load_balancer.rs | 23 +++++++++++++++----- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/opnsense-config-xml/src/data/dnsmasq.rs b/opnsense-config-xml/src/data/dnsmasq.rs index db2b8c1..fe76b66 100644 --- a/opnsense-config-xml/src/data/dnsmasq.rs +++ b/opnsense-config-xml/src/data/dnsmasq.rs @@ -36,6 +36,27 @@ pub struct DnsMasq { pub dhcp_options: Vec, pub dhcp_boot: Vec, pub dhcp_tags: Vec, + pub hosts: Vec, +} + +#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)] +#[yaserde(rename = "hosts")] +pub struct DnsmasqHost { + #[yaserde(attribute = true)] + pub uuid: String, + pub host: String, + pub domain: MaybeString, + pub local: MaybeString, + pub ip: MaybeString, + pub cnames: MaybeString, + pub client_id: MaybeString, + pub hwaddr: MaybeString, + pub lease_time: MaybeString, + pub ignore: Option, + pub set_tag: MaybeString, + pub descr: MaybeString, + pub comments: MaybeString, + pub aliases: MaybeString, } // Represents the element and its nested fields. diff --git a/opnsense-config-xml/src/data/opnsense.rs b/opnsense-config-xml/src/data/opnsense.rs index c39f1c5..2efbaf3 100644 --- a/opnsense-config-xml/src/data/opnsense.rs +++ b/opnsense-config-xml/src/data/opnsense.rs @@ -189,7 +189,7 @@ pub struct System { pub timeservers: String, pub webgui: WebGui, pub usevirtualterminal: u8, - pub disablenatreflection: String, + pub disablenatreflection: Option, pub disableconsolemenu: u8, pub disablevlanhwfilter: u8, pub disablechecksumoffloading: u8, @@ -256,7 +256,7 @@ pub struct Firmware { #[yaserde(rename = "type")] pub firmware_type: MaybeString, pub subscription: MaybeString, - pub reboot: MaybeString, + pub reboot: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] @@ -1449,6 +1449,9 @@ pub struct Vip { pub advbase: Option, pub advskew: Option, pub descr: Option, + pub peer: Option, + pub peer6: Option, + pub nosync: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index d3393cc..1b03f13 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -1,11 +1,8 @@ -use std::{collections::HashSet, sync::Arc}; - -use log::warn; +use crate::{config::OPNsenseShell, Error}; use opnsense_config_xml::{ Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense, }; - -use crate::{config::OPNsenseShell, Error}; +use std::{collections::HashSet, sync::Arc}; pub struct LoadBalancerConfig<'a> { opnsense: &'a mut OPNsense, @@ -71,6 +68,7 @@ impl<'a> LoadBalancerConfig<'a> { } /// Adds the components of a new service to the HAProxy configuration. + /// This function de-duplicates servers by name to prevent configuration errors. fn add_new_service( &mut self, frontend: Frontend, @@ -82,7 +80,20 @@ impl<'a> LoadBalancerConfig<'a> { if let Some(check) = healthcheck { haproxy.healthchecks.healthchecks.push(check); } - haproxy.servers.servers.extend(servers); + + let mut existing_server_names: HashSet<_> = haproxy + .servers + .servers + .iter() + .map(|s| s.name.clone()) + .collect(); + + for server in servers { + if existing_server_names.insert(server.name.clone()) { + haproxy.servers.servers.push(server); + } + } + haproxy.backends.backends.push(backend); haproxy.frontends.frontend.push(frontend); }); -- 2.39.5 From 3d8dd4d8e6a4e2b205a30a3734aa73563f8c3da7 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Wed, 3 Sep 2025 20:39:42 -0400 Subject: [PATCH 4/6] support optional server fields --- harmony/src/infra/opnsense/load_balancer.rs | 36 ++++++++++---------- opnsense-config-xml/src/data/haproxy.rs | 6 ++-- opnsense-config/src/modules/load_balancer.rs | 3 +- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index 7715944..3d981ad 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -98,7 +98,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( .backends .backends .iter() - .find(|b| b.uuid == frontend.default_backend); + .find(|b| Some(b.uuid.clone()) == frontend.default_backend); let mut health_check = None; match matching_backend { @@ -144,11 +144,11 @@ pub(crate) fn get_servers_for_backend( .servers .iter() .filter_map(|server| { + let address = server.address.clone()?; + let port = server.port?; + if backend_servers.contains(&server.uuid.as_str()) { - return Some(BackendServer { - address: server.address.clone(), - port: server.port, - }); + return Some(BackendServer { address, port }); } None }) @@ -316,7 +316,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml( name: format!("frontend_{}", service.listening_port), bind: service.listening_port.to_string(), mode: "tcp".to_string(), // TODO do not depend on health check here - default_backend: backend.uuid.clone(), + default_backend: Some(backend.uuid.clone()), ..Default::default() }; info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); @@ -330,8 +330,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer { uuid: Uuid::new_v4().to_string(), name: format!("{}_{}", &server.address, &server.port), enabled: 1, - address: server.address.clone(), - port: server.port, + address: Some(server.address.clone()), + port: Some(server.port), mode: "active".to_string(), server_type: "static".to_string(), ..Default::default() @@ -354,8 +354,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -380,8 +380,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -400,8 +400,8 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "192.168.1.1".to_string(), - port: 80, + address: Some("192.168.1.1".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); @@ -422,16 +422,16 @@ mod tests { let mut haproxy = HAProxy::default(); let server = HAProxyServer { uuid: "server1".to_string(), - address: "some-hostname.test.mcd".to_string(), - port: 80, + address: Some("some-hostname.test.mcd".to_string()), + port: Some(80), ..Default::default() }; haproxy.servers.servers.push(server); let server = HAProxyServer { uuid: "server2".to_string(), - address: "192.168.1.2".to_string(), - port: 8080, + address: Some("192.168.1.2".to_string()), + port: Some(8080), ..Default::default() }; haproxy.servers.servers.push(server); diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index 7aa407c..6db1b8f 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -310,7 +310,7 @@ pub struct Frontend { pub bind_options: MaybeString, pub mode: String, #[yaserde(rename = "defaultBackend")] - pub default_backend: String, + pub default_backend: Option, pub ssl_enabled: i32, pub ssl_certificates: MaybeString, pub ssl_default_certificate: MaybeString, @@ -543,8 +543,8 @@ pub struct HAProxyServer { pub enabled: u8, pub name: String, pub description: MaybeString, - pub address: String, - pub port: u16, + pub address: Option, + pub port: Option, pub checkport: MaybeString, pub mode: String, pub multiplexer_protocol: MaybeString, diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 1b03f13..1a35388 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -139,11 +139,12 @@ fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> } fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option { + let default_backend = old_frontend.default_backend?; let pos = haproxy .backends .backends .iter() - .position(|b| b.uuid == old_frontend.default_backend); + .position(|b| b.uuid == default_backend); match pos { Some(pos) => Some(haproxy.backends.backends.remove(pos)), -- 2.39.5 From a31b459f337dd3de883e6c10908c2acd71aae3bb Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Wed, 3 Sep 2025 21:55:23 -0400 Subject: [PATCH 5/6] fix: de-duplicate backend servers list mapped from topology --- harmony/src/infra/opnsense/load_balancer.rs | 3 +- .../modules/okd/bootstrap_load_balancer.rs | 3 ++ opnsense-config/src/modules/load_balancer.rs | 31 ++++++++++--------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index 3d981ad..9d8154a 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -108,8 +108,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer( } None => { warn!( - "HAProxy config could not find a matching backend for frontend {:?}", - frontend + "HAProxy config could not find a matching backend for frontend {frontend:?}" ); } } diff --git a/harmony/src/modules/okd/bootstrap_load_balancer.rs b/harmony/src/modules/okd/bootstrap_load_balancer.rs index d6cd2f3..63725f0 100644 --- a/harmony/src/modules/okd/bootstrap_load_balancer.rs +++ b/harmony/src/modules/okd/bootstrap_load_balancer.rs @@ -54,6 +54,7 @@ impl OKDBootstrapLoadBalancerScore { }, } } + fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec { let mut backend: Vec<_> = topology .control_plane @@ -67,6 +68,8 @@ impl OKDBootstrapLoadBalancerScore { address: topology.bootstrap_host.ip.to_string(), port, }); + + backend.dedup(); backend } } diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 1a35388..6ef6b9b 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -47,9 +47,22 @@ impl<'a> LoadBalancerConfig<'a> { healthcheck: Option, ) { self.remove_service_by_bind_address(&frontend.bind); + self.remove_servers(&servers); + self.add_new_service(frontend, backend, servers, healthcheck); } + // Remove the corresponding real servers based on their name if they already exist. + fn remove_servers(&mut self, servers: &[HAProxyServer]) { + let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect(); + self.with_haproxy(|haproxy| { + haproxy + .servers + .servers + .retain(|s| !server_names.contains(&s.name)); + }); + } + /// Removes a service and its dependent components based on the frontend's bind address. /// This performs a cascading delete of the frontend, backend, servers, and health check. fn remove_service_by_bind_address(&mut self, bind_address: &str) { @@ -63,7 +76,7 @@ impl<'a> LoadBalancerConfig<'a> { }; remove_healthcheck(haproxy, &old_backend); - remove_servers(haproxy, &old_backend); + remove_linked_servers(haproxy, &old_backend); }); } @@ -81,19 +94,7 @@ impl<'a> LoadBalancerConfig<'a> { haproxy.healthchecks.healthchecks.push(check); } - let mut existing_server_names: HashSet<_> = haproxy - .servers - .servers - .iter() - .map(|s| s.name.clone()) - .collect(); - - for server in servers { - if existing_server_names.insert(server.name.clone()) { - haproxy.servers.servers.push(server); - } - } - + haproxy.servers.servers.extend(servers); haproxy.backends.backends.push(backend); haproxy.frontends.frontend.push(frontend); }); @@ -162,7 +163,7 @@ fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) { } /// Remove the backend's servers. This assumes servers are not shared between services. -fn remove_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { +fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { if let Some(server_uuids_str) = &backend.linked_servers.content { let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect(); haproxy -- 2.39.5 From 9b892dc882c96527af5d82cf6e9916505407d251 Mon Sep 17 00:00:00 2001 From: Ian Letourneau Date: Thu, 4 Sep 2025 13:05:21 -0400 Subject: [PATCH 6/6] test: add load_balancer::configure_service tests --- Cargo.lock | 12 ++ Cargo.toml | 14 +- opnsense-config-xml/src/data/haproxy.rs | 8 +- opnsense-config/Cargo.toml | 1 + opnsense-config/src/config/shell/mod.rs | 6 +- opnsense-config/src/modules/load_balancer.rs | 214 ++++++++++++++++++- 6 files changed, 243 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62d8aee..85f3578 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,6 +429,15 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "assertor" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb" +dependencies = [ + "num-traits", +] + [[package]] name = "async-broadcast" version = "0.7.2" @@ -1846,6 +1855,8 @@ dependencies = [ "env_logger", "harmony", "harmony_macros", + "harmony_secret", + "harmony_secret_derive", "harmony_tui", "harmony_types", "log", @@ -3830,6 +3841,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" name = "opnsense-config" version = "0.1.0" dependencies = [ + "assertor", "async-trait", "chrono", "env_logger", diff --git a/Cargo.toml b/Cargo.toml index d92c0e7..32231d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,8 @@ members = [ "harmony_composer", "harmony_inventory_agent", "harmony_secret_derive", - "harmony_secret", "adr/agent_discovery/mdns", + "harmony_secret", + "adr/agent_discovery/mdns", ] [workspace.package] @@ -66,5 +67,12 @@ thiserror = "2.0.14" serde = { version = "1.0.209", features = ["derive", "rc"] } serde_json = "1.0.127" askama = "0.14" -sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] } -reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false } +sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] } +reqwest = { version = "0.12", features = [ + "blocking", + "stream", + "rustls-tls", + "http2", + "json", +], default-features = false } +assertor = "0.0.4" diff --git a/opnsense-config-xml/src/data/haproxy.rs b/opnsense-config-xml/src/data/haproxy.rs index 6db1b8f..b0aedc2 100644 --- a/opnsense-config-xml/src/data/haproxy.rs +++ b/opnsense-config-xml/src/data/haproxy.rs @@ -297,7 +297,7 @@ pub struct HAProxyFrontends { pub frontend: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct Frontend { #[yaserde(attribute = true)] pub uuid: String, @@ -416,7 +416,7 @@ pub struct HAProxyBackends { pub backends: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyBackend { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -535,7 +535,7 @@ pub struct HAProxyServers { pub servers: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyServer { #[yaserde(attribute = true, rename = "uuid")] pub uuid: String, @@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks { pub healthchecks: Vec, } -#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] +#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)] pub struct HAProxyHealthCheck { #[yaserde(attribute = true)] pub uuid: String, diff --git a/opnsense-config/Cargo.toml b/opnsense-config/Cargo.toml index e70bc12..8e7541c 100644 --- a/opnsense-config/Cargo.toml +++ b/opnsense-config/Cargo.toml @@ -24,6 +24,7 @@ uuid.workspace = true [dev-dependencies] pretty_assertions.workspace = true +assertor.workspace = true [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] } diff --git a/opnsense-config/src/config/shell/mod.rs b/opnsense-config/src/config/shell/mod.rs index aa03837..aa94ff1 100644 --- a/opnsense-config/src/config/shell/mod.rs +++ b/opnsense-config/src/config/shell/mod.rs @@ -1,9 +1,7 @@ mod ssh; -pub use ssh::*; - -use async_trait::async_trait; - use crate::Error; +use async_trait::async_trait; +pub use ssh::*; #[async_trait] pub trait OPNsenseShell: std::fmt::Debug + Send + Sync { diff --git a/opnsense-config/src/modules/load_balancer.rs b/opnsense-config/src/modules/load_balancer.rs index 6ef6b9b..00cb364 100644 --- a/opnsense-config/src/modules/load_balancer.rs +++ b/opnsense-config/src/modules/load_balancer.rs @@ -28,7 +28,7 @@ impl<'a> LoadBalancerConfig<'a> { match &mut self.opnsense.opnsense.haproxy.as_mut() { Some(haproxy) => f(haproxy), None => unimplemented!( - "Adding a backend is not supported when haproxy config does not exist yet" + "Cannot configure load balancer when haproxy config does not exist yet" ), } } @@ -172,3 +172,215 @@ fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) { .retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str())); } } + +#[cfg(test)] +mod tests { + use crate::config::DummyOPNSenseShell; + use assertor::*; + use opnsense_config_xml::{ + Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck, + HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense, + }; + use std::sync::Arc; + + use super::LoadBalancerConfig; + + static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80"; + static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443"; + + static SERVER_ADDRESS: &str = "1.1.1.1:80"; + static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443"; + + #[test] + fn configure_service_should_add_all_service_components_to_haproxy() { + let mut opnsense = given_opnsense(); + let mut load_balancer = given_load_balancer(&mut opnsense); + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + + load_balancer.configure_service( + frontend.clone(), + backend.clone(), + servers.clone(), + Some(healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend], + vec![backend], + servers, + vec![healthcheck], + ); + } + + #[test] + fn configure_service_should_replace_service_on_same_bind_address() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) = + given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + + load_balancer.configure_service( + updated_frontend.clone(), + updated_backend.clone(), + updated_servers.clone(), + Some(updated_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![updated_frontend], + vec![updated_backend], + updated_servers, + vec![updated_healthcheck], + ); + } + + #[test] + fn configure_service_should_keep_existing_service_on_different_bind_addresses() { + let (healthcheck, servers, backend, frontend) = + given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS); + let (other_healthcheck, other_servers, other_backend, other_frontend) = + given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS); + let mut opnsense = given_opnsense_with(given_haproxy( + vec![frontend.clone()], + vec![backend.clone()], + servers.clone(), + vec![healthcheck.clone()], + )); + let mut load_balancer = given_load_balancer(&mut opnsense); + + load_balancer.configure_service( + other_frontend.clone(), + other_backend.clone(), + other_servers.clone(), + Some(other_healthcheck.clone()), + ); + + assert_haproxy_configured_with( + opnsense, + vec![frontend, other_frontend], + vec![backend, other_backend], + [servers, other_servers].concat(), + vec![healthcheck, other_healthcheck], + ); + } + + fn assert_haproxy_configured_with( + opnsense: OPNsense, + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) { + let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap(); + assert_that!(haproxy.frontends.frontend).contains_exactly(frontends); + assert_that!(haproxy.backends.backends).contains_exactly(backends); + assert_that!(haproxy.servers.servers).is_equal_to(servers); + assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks); + } + + fn given_opnsense() -> OPNsense { + OPNsense::default() + } + + fn given_opnsense_with(haproxy: HAProxy) -> OPNsense { + let mut opnsense = OPNsense::default(); + opnsense.opnsense.haproxy = Some(haproxy); + + opnsense + } + + fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> { + let opnsense_shell = Arc::new(DummyOPNSenseShell {}); + if opnsense.opnsense.haproxy.is_none() { + opnsense.opnsense.haproxy = Some(HAProxy::default()); + } + LoadBalancerConfig::new(opnsense, opnsense_shell) + } + + fn given_service( + bind_address: &str, + server_address: &str, + ) -> ( + HAProxyHealthCheck, + Vec, + HAProxyBackend, + Frontend, + ) { + let healthcheck = given_healthcheck(); + let servers = vec![given_server(server_address)]; + let backend = given_backend(); + let frontend = given_frontend(bind_address); + (healthcheck, servers, backend, frontend) + } + + fn given_haproxy( + frontends: Vec, + backends: Vec, + servers: Vec, + healthchecks: Vec, + ) -> HAProxy { + HAProxy { + frontends: HAProxyFrontends { + frontend: frontends, + }, + backends: HAProxyBackends { backends }, + servers: HAProxyServers { servers }, + healthchecks: HAProxyHealthChecks { healthchecks }, + ..Default::default() + } + } + + fn given_frontend(bind_address: &str) -> Frontend { + Frontend { + uuid: "uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: format!("frontend_{bind_address}"), + bind: bind_address.into(), + default_backend: Some("backend-uuid".into()), + ..Default::default() + } + } + + fn given_backend() -> HAProxyBackend { + HAProxyBackend { + uuid: "backend-uuid".into(), + id: HAProxyId::default(), + enabled: 1, + name: "backend_192.168.1.1:80".into(), + linked_servers: MaybeString::from("server-uuid"), + health_check_enabled: 1, + health_check: MaybeString::from("healthcheck-uuid"), + ..Default::default() + } + } + + fn given_server(address: &str) -> HAProxyServer { + HAProxyServer { + uuid: "server-uuid".into(), + id: HAProxyId::default(), + name: address.into(), + address: Some(address.into()), + ..Default::default() + } + } + + fn given_healthcheck() -> HAProxyHealthCheck { + HAProxyHealthCheck { + uuid: "healthcheck-uuid".into(), + name: "healthcheck".into(), + ..Default::default() + } + } +} -- 2.39.5