The previous implementation blindly added HAProxy components without checking for existing configurations on the same port, which caused duplicate entries and errors when a service was updated. This commit refactors the logic to a robust "remove-then-add" strategy. The configure_service method now finds and removes any existing frontend and its dependent components (backend, servers, health check) before adding the new, complete service definition. This change makes the process fully idempotent, preventing configuration drift and ensuring a predictable state. Co-authored-by: Ian Letourneau <letourneau.ian@gmail.com> Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/129
387 lines
13 KiB
Rust
387 lines
13 KiB
Rust
use crate::{config::OPNsenseShell, Error};
|
|
use opnsense_config_xml::{
|
|
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
|
|
};
|
|
use std::{collections::HashSet, sync::Arc};
|
|
|
|
pub struct LoadBalancerConfig<'a> {
|
|
opnsense: &'a mut OPNsense,
|
|
opnsense_shell: Arc<dyn OPNsenseShell>,
|
|
}
|
|
|
|
impl<'a> LoadBalancerConfig<'a> {
|
|
pub fn new(opnsense: &'a mut OPNsense, opnsense_shell: Arc<dyn OPNsenseShell>) -> Self {
|
|
Self {
|
|
opnsense,
|
|
opnsense_shell,
|
|
}
|
|
}
|
|
|
|
pub fn get_full_config(&self) -> &Option<HAProxy> {
|
|
&self.opnsense.opnsense.haproxy
|
|
}
|
|
|
|
fn with_haproxy<F, R>(&mut self, f: F) -> R
|
|
where
|
|
F: FnOnce(&mut HAProxy) -> R,
|
|
{
|
|
match &mut self.opnsense.opnsense.haproxy.as_mut() {
|
|
Some(haproxy) => f(haproxy),
|
|
None => unimplemented!(
|
|
"Cannot configure load balancer when haproxy config does not exist yet"
|
|
),
|
|
}
|
|
}
|
|
|
|
pub fn enable(&mut self, enabled: bool) {
|
|
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
|
|
}
|
|
|
|
/// Configures a service by removing any existing service on the same port
|
|
/// and then adding the new definition. This ensures idempotency.
|
|
pub fn configure_service(
|
|
&mut self,
|
|
frontend: Frontend,
|
|
backend: HAProxyBackend,
|
|
servers: Vec<HAProxyServer>,
|
|
healthcheck: Option<HAProxyHealthCheck>,
|
|
) {
|
|
self.remove_service_by_bind_address(&frontend.bind);
|
|
self.remove_servers(&servers);
|
|
|
|
self.add_new_service(frontend, backend, servers, healthcheck);
|
|
}
|
|
|
|
// Remove the corresponding real servers based on their name if they already exist.
|
|
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
|
|
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
|
|
self.with_haproxy(|haproxy| {
|
|
haproxy
|
|
.servers
|
|
.servers
|
|
.retain(|s| !server_names.contains(&s.name));
|
|
});
|
|
}
|
|
|
|
/// Removes a service and its dependent components based on the frontend's bind address.
|
|
/// This performs a cascading delete of the frontend, backend, servers, and health check.
|
|
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
|
|
self.with_haproxy(|haproxy| {
|
|
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
|
|
return;
|
|
};
|
|
|
|
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
|
|
return;
|
|
};
|
|
|
|
remove_healthcheck(haproxy, &old_backend);
|
|
remove_linked_servers(haproxy, &old_backend);
|
|
});
|
|
}
|
|
|
|
/// Adds the components of a new service to the HAProxy configuration.
|
|
/// This function de-duplicates servers by name to prevent configuration errors.
|
|
fn add_new_service(
|
|
&mut self,
|
|
frontend: Frontend,
|
|
backend: HAProxyBackend,
|
|
servers: Vec<HAProxyServer>,
|
|
healthcheck: Option<HAProxyHealthCheck>,
|
|
) {
|
|
self.with_haproxy(|haproxy| {
|
|
if let Some(check) = healthcheck {
|
|
haproxy.healthchecks.healthchecks.push(check);
|
|
}
|
|
|
|
haproxy.servers.servers.extend(servers);
|
|
haproxy.backends.backends.push(backend);
|
|
haproxy.frontends.frontend.push(frontend);
|
|
});
|
|
}
|
|
|
|
pub async fn reload_restart(&self) -> Result<(), Error> {
|
|
self.opnsense_shell.exec("configctl haproxy stop").await?;
|
|
self.opnsense_shell
|
|
.exec("configctl template reload OPNsense/HAProxy")
|
|
.await?;
|
|
self.opnsense_shell
|
|
.exec("configctl template reload OPNsense/Syslog")
|
|
.await?;
|
|
self.opnsense_shell
|
|
.exec("/usr/local/sbin/haproxy -c -f /usr/local/etc/haproxy.conf.staging")
|
|
.await?;
|
|
|
|
// This script copies the staging config to production config. I am not 100% sure it is
|
|
// required in the context
|
|
self.opnsense_shell
|
|
.exec("/usr/local/opnsense/scripts/OPNsense/HAProxy/setup.sh deploy")
|
|
.await?;
|
|
|
|
self.opnsense_shell
|
|
.exec("configctl haproxy configtest")
|
|
.await?;
|
|
self.opnsense_shell.exec("configctl haproxy start").await?;
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
|
|
let pos = haproxy
|
|
.frontends
|
|
.frontend
|
|
.iter()
|
|
.position(|f| f.bind == bind_address);
|
|
|
|
match pos {
|
|
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
|
|
None => None,
|
|
}
|
|
}
|
|
|
|
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
|
|
let default_backend = old_frontend.default_backend?;
|
|
let pos = haproxy
|
|
.backends
|
|
.backends
|
|
.iter()
|
|
.position(|b| b.uuid == default_backend);
|
|
|
|
match pos {
|
|
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
|
|
None => None, // orphaned frontend, shouldn't happen
|
|
}
|
|
}
|
|
|
|
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
if let Some(uuid) = &backend.health_check.content {
|
|
haproxy
|
|
.healthchecks
|
|
.healthchecks
|
|
.retain(|h| h.uuid != *uuid);
|
|
}
|
|
}
|
|
|
|
/// Remove the backend's servers. This assumes servers are not shared between services.
|
|
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
|
|
if let Some(server_uuids_str) = &backend.linked_servers.content {
|
|
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
|
|
haproxy
|
|
.servers
|
|
.servers
|
|
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use crate::config::DummyOPNSenseShell;
|
|
use assertor::*;
|
|
use opnsense_config_xml::{
|
|
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
|
|
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
|
|
};
|
|
use std::sync::Arc;
|
|
|
|
use super::LoadBalancerConfig;
|
|
|
|
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
|
|
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
|
|
|
|
static SERVER_ADDRESS: &str = "1.1.1.1:80";
|
|
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
|
|
|
|
#[test]
|
|
fn configure_service_should_add_all_service_components_to_haproxy() {
|
|
let mut opnsense = given_opnsense();
|
|
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
let (healthcheck, servers, backend, frontend) =
|
|
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
|
|
load_balancer.configure_service(
|
|
frontend.clone(),
|
|
backend.clone(),
|
|
servers.clone(),
|
|
Some(healthcheck.clone()),
|
|
);
|
|
|
|
assert_haproxy_configured_with(
|
|
opnsense,
|
|
vec![frontend],
|
|
vec![backend],
|
|
servers,
|
|
vec![healthcheck],
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn configure_service_should_replace_service_on_same_bind_address() {
|
|
let (healthcheck, servers, backend, frontend) =
|
|
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
vec![frontend.clone()],
|
|
vec![backend.clone()],
|
|
servers.clone(),
|
|
vec![healthcheck.clone()],
|
|
));
|
|
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
|
|
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
|
|
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
|
|
load_balancer.configure_service(
|
|
updated_frontend.clone(),
|
|
updated_backend.clone(),
|
|
updated_servers.clone(),
|
|
Some(updated_healthcheck.clone()),
|
|
);
|
|
|
|
assert_haproxy_configured_with(
|
|
opnsense,
|
|
vec![updated_frontend],
|
|
vec![updated_backend],
|
|
updated_servers,
|
|
vec![updated_healthcheck],
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
|
|
let (healthcheck, servers, backend, frontend) =
|
|
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
|
|
let (other_healthcheck, other_servers, other_backend, other_frontend) =
|
|
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
|
|
let mut opnsense = given_opnsense_with(given_haproxy(
|
|
vec![frontend.clone()],
|
|
vec![backend.clone()],
|
|
servers.clone(),
|
|
vec![healthcheck.clone()],
|
|
));
|
|
let mut load_balancer = given_load_balancer(&mut opnsense);
|
|
|
|
load_balancer.configure_service(
|
|
other_frontend.clone(),
|
|
other_backend.clone(),
|
|
other_servers.clone(),
|
|
Some(other_healthcheck.clone()),
|
|
);
|
|
|
|
assert_haproxy_configured_with(
|
|
opnsense,
|
|
vec![frontend, other_frontend],
|
|
vec![backend, other_backend],
|
|
[servers, other_servers].concat(),
|
|
vec![healthcheck, other_healthcheck],
|
|
);
|
|
}
|
|
|
|
fn assert_haproxy_configured_with(
|
|
opnsense: OPNsense,
|
|
frontends: Vec<Frontend>,
|
|
backends: Vec<HAProxyBackend>,
|
|
servers: Vec<HAProxyServer>,
|
|
healthchecks: Vec<HAProxyHealthCheck>,
|
|
) {
|
|
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
|
|
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
|
|
assert_that!(haproxy.backends.backends).contains_exactly(backends);
|
|
assert_that!(haproxy.servers.servers).is_equal_to(servers);
|
|
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
|
|
}
|
|
|
|
fn given_opnsense() -> OPNsense {
|
|
OPNsense::default()
|
|
}
|
|
|
|
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
|
|
let mut opnsense = OPNsense::default();
|
|
opnsense.opnsense.haproxy = Some(haproxy);
|
|
|
|
opnsense
|
|
}
|
|
|
|
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
|
|
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
|
|
if opnsense.opnsense.haproxy.is_none() {
|
|
opnsense.opnsense.haproxy = Some(HAProxy::default());
|
|
}
|
|
LoadBalancerConfig::new(opnsense, opnsense_shell)
|
|
}
|
|
|
|
fn given_service(
|
|
bind_address: &str,
|
|
server_address: &str,
|
|
) -> (
|
|
HAProxyHealthCheck,
|
|
Vec<HAProxyServer>,
|
|
HAProxyBackend,
|
|
Frontend,
|
|
) {
|
|
let healthcheck = given_healthcheck();
|
|
let servers = vec![given_server(server_address)];
|
|
let backend = given_backend();
|
|
let frontend = given_frontend(bind_address);
|
|
(healthcheck, servers, backend, frontend)
|
|
}
|
|
|
|
fn given_haproxy(
|
|
frontends: Vec<Frontend>,
|
|
backends: Vec<HAProxyBackend>,
|
|
servers: Vec<HAProxyServer>,
|
|
healthchecks: Vec<HAProxyHealthCheck>,
|
|
) -> HAProxy {
|
|
HAProxy {
|
|
frontends: HAProxyFrontends {
|
|
frontend: frontends,
|
|
},
|
|
backends: HAProxyBackends { backends },
|
|
servers: HAProxyServers { servers },
|
|
healthchecks: HAProxyHealthChecks { healthchecks },
|
|
..Default::default()
|
|
}
|
|
}
|
|
|
|
fn given_frontend(bind_address: &str) -> Frontend {
|
|
Frontend {
|
|
uuid: "uuid".into(),
|
|
id: HAProxyId::default(),
|
|
enabled: 1,
|
|
name: format!("frontend_{bind_address}"),
|
|
bind: bind_address.into(),
|
|
default_backend: Some("backend-uuid".into()),
|
|
..Default::default()
|
|
}
|
|
}
|
|
|
|
fn given_backend() -> HAProxyBackend {
|
|
HAProxyBackend {
|
|
uuid: "backend-uuid".into(),
|
|
id: HAProxyId::default(),
|
|
enabled: 1,
|
|
name: "backend_192.168.1.1:80".into(),
|
|
linked_servers: MaybeString::from("server-uuid"),
|
|
health_check_enabled: 1,
|
|
health_check: MaybeString::from("healthcheck-uuid"),
|
|
..Default::default()
|
|
}
|
|
}
|
|
|
|
fn given_server(address: &str) -> HAProxyServer {
|
|
HAProxyServer {
|
|
uuid: "server-uuid".into(),
|
|
id: HAProxyId::default(),
|
|
name: address.into(),
|
|
address: Some(address.into()),
|
|
..Default::default()
|
|
}
|
|
}
|
|
|
|
fn given_healthcheck() -> HAProxyHealthCheck {
|
|
HAProxyHealthCheck {
|
|
uuid: "healthcheck-uuid".into(),
|
|
name: "healthcheck".into(),
|
|
..Default::default()
|
|
}
|
|
}
|
|
}
|