fix(opnsense-config): ensure load balancer service configuration is idempotent #129

Merged
letian merged 8 commits from idempotent-load-balancer into master 2025-10-20 19:18:50 +00:00
3 changed files with 20 additions and 17 deletions
Showing only changes of commit a31b459f33 - Show all commits

View File

@ -108,8 +108,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
}
None => {
warn!(
"HAProxy config could not find a matching backend for frontend {:?}",
frontend
"HAProxy config could not find a matching backend for frontend {frontend:?}"
);
}
}

View File

@ -54,6 +54,7 @@ impl OKDBootstrapLoadBalancerScore {
},
}
}
fn topology_to_backend_server(topology: &HAClusterTopology, port: u16) -> Vec<BackendServer> {
let mut backend: Vec<_> = topology
.control_plane
@ -67,6 +68,8 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(),
port,
});
backend.dedup();
backend
}
}

View File

@ -47,9 +47,22 @@ impl<'a> LoadBalancerConfig<'a> {
healthcheck: Option<HAProxyHealthCheck>,
) {
self.remove_service_by_bind_address(&frontend.bind);
self.remove_servers(&servers);
self.add_new_service(frontend, backend, servers, healthcheck);
}
// Remove the corresponding real servers based on their name if they already exist.
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
letian marked this conversation as resolved Outdated

As of now, the first time you add a load balancer service, everything will work as expected.

Though if you run it again, as we update in place the existing backend and we reuse its uuid, the frontend will be referring to a non-existent uuid for the default_backend (see harmony/src/infra/opnsense/load_balancer.rs on line 323). So we need to fix this before merging.

As of now, the first time you add a load balancer service, everything will work as expected. Though if you run it again, as we update in place the existing backend and we reuse its uuid, the frontend will be referring to a non-existent uuid for the `default_backend` (see `harmony/src/infra/opnsense/load_balancer.rs` on line 323). So we need to fix this before merging.
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
self.with_haproxy(|haproxy| {
haproxy
.servers
.servers
.retain(|s| !server_names.contains(&s.name));
});
}
/// Removes a service and its dependent components based on the frontend's bind address.
/// This performs a cascading delete of the frontend, backend, servers, and health check.
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
@ -63,7 +76,7 @@ impl<'a> LoadBalancerConfig<'a> {
};
remove_healthcheck(haproxy, &old_backend);
remove_servers(haproxy, &old_backend);
remove_linked_servers(haproxy, &old_backend);
});
}
@ -81,19 +94,7 @@ impl<'a> LoadBalancerConfig<'a> {
haproxy.healthchecks.healthchecks.push(check);
}
let mut existing_server_names: HashSet<_> = haproxy
.servers
.servers
.iter()
.map(|s| s.name.clone())
.collect();
for server in servers {
if existing_server_names.insert(server.name.clone()) {
haproxy.servers.servers.push(server);
}
}
haproxy.servers.servers.extend(servers);
haproxy.backends.backends.push(backend);
haproxy.frontends.frontend.push(frontend);
});
@ -162,7 +163,7 @@ fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
}
/// Remove the backend's servers. This assumes servers are not shared between services.
fn remove_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(server_uuids_str) = &backend.linked_servers.content {
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
haproxy