Compare commits

...

5 Commits

Author SHA1 Message Date
a31b459f33 fix: de-duplicate backend servers list mapped from topology
All checks were successful
Run Check Script / check (pull_request) Successful in 1m28s
2025-09-03 22:00:28 -04:00
3d8dd4d8e6 support optional server fields 2025-09-03 20:39:42 -04:00
01206f5db1 de-duplicate stuff
All checks were successful
Run Check Script / check (pull_request) Successful in 1m11s
2025-09-03 17:18:26 -04:00
fc4c18ccea remove old service components (frontend, backend, servers, healthcheck) with same bind address before adding new service
All checks were successful
Run Check Script / check (pull_request) Successful in 1m10s
2025-09-03 15:58:28 -04:00
e9a1aa4831 fix: merge existing services in load balancer config
All checks were successful
Run Check Script / check (pull_request) Successful in 1m13s
2025-09-01 07:39:53 -04:00
8 changed files with 162 additions and 59 deletions

View File

@@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
&self, &self,
service: &LoadBalancerService, service: &LoadBalancerService,
) -> Result<(), ExecutorError> { ) -> Result<(), ExecutorError> {
debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?; self.add_service(service).await?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -24,19 +24,13 @@ impl LoadBalancer for OPNSenseFirewall {
} }
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> { async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await; let mut config = self.opnsense_config.write().await;
let mut load_balancer = config.load_balancer();
let (frontend, backend, servers, healthcheck) = let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service); harmony_load_balancer_service_to_haproxy_xml(service);
let mut load_balancer = config.load_balancer();
load_balancer.add_backend(backend); load_balancer.configure_service(frontend, backend, servers, healthcheck);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
Ok(()) Ok(())
} }
@@ -104,7 +98,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
.backends .backends
.backends .backends
.iter() .iter()
.find(|b| b.uuid == frontend.default_backend); .find(|b| Some(b.uuid.clone()) == frontend.default_backend);
let mut health_check = None; let mut health_check = None;
match matching_backend { match matching_backend {
@@ -114,8 +108,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
} }
None => { None => {
warn!( warn!(
"HAProxy config could not find a matching backend for frontend {:?}", "HAProxy config could not find a matching backend for frontend {frontend:?}"
frontend
); );
} }
} }
@@ -150,11 +143,11 @@ pub(crate) fn get_servers_for_backend(
.servers .servers
.iter() .iter()
.filter_map(|server| { .filter_map(|server| {
let address = server.address.clone()?;
let port = server.port?;
if backend_servers.contains(&server.uuid.as_str()) { if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer { return Some(BackendServer { address, port });
address: server.address.clone(),
port: server.port,
});
} }
None None
}) })
@@ -322,7 +315,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
name: format!("frontend_{}", service.listening_port), name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(), bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: backend.uuid.clone(), default_backend: Some(backend.uuid.clone()),
..Default::default() ..Default::default()
}; };
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp"); info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
@@ -336,8 +329,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
uuid: Uuid::new_v4().to_string(), uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port), name: format!("{}_{}", &server.address, &server.port),
enabled: 1, enabled: 1,
address: server.address.clone(), address: Some(server.address.clone()),
port: server.port, port: Some(server.port),
mode: "active".to_string(), mode: "active".to_string(),
server_type: "static".to_string(), server_type: "static".to_string(),
..Default::default() ..Default::default()
@@ -360,8 +353,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: "192.168.1.1".to_string(), address: Some("192.168.1.1".to_string()),
port: 80, port: Some(80),
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -386,8 +379,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: "192.168.1.1".to_string(), address: Some("192.168.1.1".to_string()),
port: 80, port: Some(80),
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -406,8 +399,8 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: "192.168.1.1".to_string(), address: Some("192.168.1.1".to_string()),
port: 80, port: Some(80),
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
@@ -428,16 +421,16 @@ mod tests {
let mut haproxy = HAProxy::default(); let mut haproxy = HAProxy::default();
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server1".to_string(), uuid: "server1".to_string(),
address: "some-hostname.test.mcd".to_string(), address: Some("some-hostname.test.mcd".to_string()),
port: 80, port: Some(80),
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);
let server = HAProxyServer { let server = HAProxyServer {
uuid: "server2".to_string(), uuid: "server2".to_string(),
address: "192.168.1.2".to_string(), address: Some("192.168.1.2".to_string()),
port: 8080, port: Some(8080),
..Default::default() ..Default::default()
}; };
haproxy.servers.servers.push(server); haproxy.servers.servers.push(server);

View File

@@ -54,6 +54,7 @@ impl OKDBootstrapLoadBalancerScore {
}, },
} }
} }
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> { fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut backend: Vec<_> = topology let mut backend: Vec<_> = topology
.control_plane .control_plane
@@ -67,6 +68,8 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(), address: topology.bootstrap_host.ip.to_string(),
port, port,
}); });
backend.dedup();
backend backend
} }
} }

View File

@@ -36,6 +36,27 @@ pub struct DnsMasq {
pub dhcp_options: Vec<DhcpOptions>, pub dhcp_options: Vec<DhcpOptions>,
pub dhcp_boot: Vec<DhcpBoot>, pub dhcp_boot: Vec<DhcpBoot>,
pub dhcp_tags: Vec<RawXml>, pub dhcp_tags: Vec<RawXml>,
pub hosts: Vec<DnsmasqHost>,
}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize, Clone)]
#[yaserde(rename = "hosts")]
pub struct DnsmasqHost {
#[yaserde(attribute = true)]
pub uuid: String,
pub host: String,
pub domain: MaybeString,
pub local: MaybeString,
pub ip: MaybeString,
pub cnames: MaybeString,
pub client_id: MaybeString,
pub hwaddr: MaybeString,
pub lease_time: MaybeString,
pub ignore: Option<u8>,
pub set_tag: MaybeString,
pub descr: MaybeString,
pub comments: MaybeString,
pub aliases: MaybeString,
} }
// Represents the <dhcp> element and its nested fields. // Represents the <dhcp> element and its nested fields.

View File

@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
} }
} }
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug, Clone)]
pub struct HAProxyId(String); pub struct HAProxyId(String);
impl Default for HAProxyId { impl Default for HAProxyId {
@@ -310,7 +310,7 @@ pub struct Frontend {
pub bind_options: MaybeString, pub bind_options: MaybeString,
pub mode: String, pub mode: String,
#[yaserde(rename = "defaultBackend")] #[yaserde(rename = "defaultBackend")]
pub default_backend: String, pub default_backend: Option<String>,
pub ssl_enabled: i32, pub ssl_enabled: i32,
pub ssl_certificates: MaybeString, pub ssl_certificates: MaybeString,
pub ssl_default_certificate: MaybeString, pub ssl_default_certificate: MaybeString,
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
pub enabled: u8, pub enabled: u8,
pub name: String, pub name: String,
pub description: MaybeString, pub description: MaybeString,
pub address: String, pub address: Option<String>,
pub port: u16, pub port: Option<u16>,
pub checkport: MaybeString, pub checkport: MaybeString,
pub mode: String, pub mode: String,
pub multiplexer_protocol: MaybeString, pub multiplexer_protocol: MaybeString,

View File

@@ -189,7 +189,7 @@ pub struct System {
pub timeservers: String, pub timeservers: String,
pub webgui: WebGui, pub webgui: WebGui,
pub usevirtualterminal: u8, pub usevirtualterminal: u8,
pub disablenatreflection: String, pub disablenatreflection: Option<String>,
pub disableconsolemenu: u8, pub disableconsolemenu: u8,
pub disablevlanhwfilter: u8, pub disablevlanhwfilter: u8,
pub disablechecksumoffloading: u8, pub disablechecksumoffloading: u8,
@@ -256,7 +256,7 @@ pub struct Firmware {
#[yaserde(rename = "type")] #[yaserde(rename = "type")]
pub firmware_type: MaybeString, pub firmware_type: MaybeString,
pub subscription: MaybeString, pub subscription: MaybeString,
pub reboot: MaybeString, pub reboot: Option<MaybeString>,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
@@ -1449,6 +1449,9 @@ pub struct Vip {
pub advbase: Option<MaybeString>, pub advbase: Option<MaybeString>,
pub advskew: Option<MaybeString>, pub advskew: Option<MaybeString>,
pub descr: Option<MaybeString>, pub descr: Option<MaybeString>,
pub peer: Option<MaybeString>,
pub peer6: Option<MaybeString>,
pub nosync: Option<MaybeString>,
} }
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]

View File

@@ -29,8 +29,7 @@ impl SshConfigManager {
self.opnsense_shell self.opnsense_shell
.exec(&format!( .exec(&format!(
"cp /conf/config.xml /conf/backup/{}", "cp /conf/config.xml /conf/backup/{backup_filename}"
backup_filename
)) ))
.await .await
} }

View File

@@ -1,11 +1,8 @@
use std::sync::Arc; use crate::{config::OPNsenseShell, Error};
use log::warn;
use opnsense_config_xml::{ use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense, Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
}; };
use std::{collections::HashSet, sync::Arc};
use crate::{config::OPNsenseShell, Error};
pub struct LoadBalancerConfig<'a> { pub struct LoadBalancerConfig<'a> {
opnsense: &'a mut OPNsense, opnsense: &'a mut OPNsense,
@@ -40,21 +37,67 @@ impl<'a> LoadBalancerConfig<'a> {
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32); self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
} }
pub fn add_backend(&mut self, backend: HAProxyBackend) { /// Configures a service by removing any existing service on the same port
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks"); /// and then adding the new definition. This ensures idempotency.
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend)); pub fn configure_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.remove_service_by_bind_address(&frontend.bind);
self.remove_servers(&servers);
self.add_new_service(frontend, backend, servers, healthcheck);
} }
pub fn add_frontend(&mut self, frontend: Frontend) { // Remove the corresponding real servers based on their name if they already exist.
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend)); fn remove_servers(&mut self, servers: &[HAProxyServer]) {
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
self.with_haproxy(|haproxy| {
haproxy
.servers
.servers
.retain(|s| !server_names.contains(&s.name));
});
} }
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) { /// Removes a service and its dependent components based on the frontend's bind address.
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck)); /// This performs a cascading delete of the frontend, backend, servers, and health check.
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
self.with_haproxy(|haproxy| {
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
return;
};
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
return;
};
remove_healthcheck(haproxy, &old_backend);
remove_linked_servers(haproxy, &old_backend);
});
} }
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) { /// Adds the components of a new service to the HAProxy configuration.
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers)); /// This function de-duplicates servers by name to prevent configuration errors.
fn add_new_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.with_haproxy(|haproxy| {
if let Some(check) = healthcheck {
haproxy.healthchecks.healthchecks.push(check);
}
haproxy.servers.servers.extend(servers);
haproxy.backends.backends.push(backend);
haproxy.frontends.frontend.push(frontend);
});
} }
pub async fn reload_restart(&self) -> Result<(), Error> { pub async fn reload_restart(&self) -> Result<(), Error> {
@@ -82,3 +125,50 @@ impl<'a> LoadBalancerConfig<'a> {
Ok(()) Ok(())
} }
} }
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
let pos = haproxy
.frontends
.frontend
.iter()
.position(|f| f.bind == bind_address);
match pos {
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
None => None,
}
}
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
let default_backend = old_frontend.default_backend?;
let pos = haproxy
.backends
.backends
.iter()
.position(|b| b.uuid == default_backend);
match pos {
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
None => None, // orphaned frontend, shouldn't happen
}
}
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(uuid) = &backend.health_check.content {
haproxy
.healthchecks
.healthchecks
.retain(|h| h.uuid != *uuid);
}
}
/// Remove the backend's servers. This assumes servers are not shared between services.
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(server_uuids_str) = &backend.linked_servers.content {
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
haproxy
.servers
.servers
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
}
}