Compare commits

..

1 Commits

17 changed files with 206 additions and 423 deletions

12
Cargo.lock generated
View File

@@ -429,15 +429,6 @@ dependencies = [
"wait-timeout",
]
[[package]]
name = "assertor"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
dependencies = [
"num-traits",
]
[[package]]
name = "async-broadcast"
version = "0.7.2"
@@ -1890,8 +1881,6 @@ dependencies = [
"env_logger",
"harmony",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_tui",
"harmony_types",
"log",
@@ -3889,7 +3878,6 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
name = "opnsense-config"
version = "0.1.0"
dependencies = [
"assertor",
"async-trait",
"chrono",
"env_logger",

View File

@@ -14,8 +14,7 @@ members = [
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
"adr/agent_discovery/mdns",
"harmony_secret", "adr/agent_discovery/mdns",
]
[workspace.package]
@@ -67,12 +66,5 @@ thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
reqwest = { version = "0.12", features = [
"blocking",
"stream",
"rustls-tls",
"http2",
"json",
], default-features = false }
assertor = "0.0.4"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }

View File

@@ -28,7 +28,13 @@ pub trait LoadBalancer: Send + Sync {
&self,
service: &LoadBalancerService,
) -> Result<(), ExecutorError> {
self.add_service(service).await?;
debug!(
"Listing LoadBalancer services {:?}",
self.list_services().await
);
if !self.list_services().await.contains(service) {
self.add_service(service).await?;
}
Ok(())
}
}

View File

@@ -26,13 +26,19 @@ impl LoadBalancer for OPNSenseFirewall {
}
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
warn!(
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
);
let mut config = self.opnsense_config.write().await;
let mut load_balancer = config.load_balancer();
let (frontend, backend, servers, healthcheck) =
harmony_load_balancer_service_to_haproxy_xml(service);
load_balancer.configure_service(frontend, backend, servers, healthcheck);
let mut load_balancer = config.load_balancer();
load_balancer.add_backend(backend);
load_balancer.add_frontend(frontend);
load_balancer.add_servers(servers);
if let Some(healthcheck) = healthcheck {
load_balancer.add_healthcheck(healthcheck);
}
Ok(())
}
@@ -100,7 +106,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
.backends
.backends
.iter()
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
.find(|b| b.uuid == frontend.default_backend);
let mut health_check = None;
match matching_backend {
@@ -110,7 +116,8 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
}
None => {
warn!(
"HAProxy config could not find a matching backend for frontend {frontend:?}"
"HAProxy config could not find a matching backend for frontend {:?}",
frontend
);
}
}
@@ -145,11 +152,11 @@ pub(crate) fn get_servers_for_backend(
.servers
.iter()
.filter_map(|server| {
let address = server.address.clone()?;
let port = server.port?;
if backend_servers.contains(&server.uuid.as_str()) {
return Some(BackendServer { address, port });
return Some(BackendServer {
address: server.address.clone(),
port: server.port,
});
}
None
})
@@ -340,7 +347,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
name: format!("frontend_{}", service.listening_port),
bind: service.listening_port.to_string(),
mode: "tcp".to_string(), // TODO do not depend on health check here
default_backend: Some(backend.uuid.clone()),
default_backend: backend.uuid.clone(),
..Default::default()
};
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
@@ -354,8 +361,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
uuid: Uuid::new_v4().to_string(),
name: format!("{}_{}", &server.address, &server.port),
enabled: 1,
address: Some(server.address.clone()),
port: Some(server.port),
address: server.address.clone(),
port: server.port,
mode: "active".to_string(),
server_type: "static".to_string(),
..Default::default()
@@ -378,8 +385,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()),
port: Some(80),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -404,8 +411,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()),
port: Some(80),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -424,8 +431,8 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: Some("192.168.1.1".to_string()),
port: Some(80),
address: "192.168.1.1".to_string(),
port: 80,
..Default::default()
};
haproxy.servers.servers.push(server);
@@ -446,16 +453,16 @@ mod tests {
let mut haproxy = HAProxy::default();
let server = HAProxyServer {
uuid: "server1".to_string(),
address: Some("some-hostname.test.mcd".to_string()),
port: Some(80),
address: "some-hostname.test.mcd".to_string(),
port: 80,
..Default::default()
};
haproxy.servers.servers.push(server);
let server = HAProxyServer {
uuid: "server2".to_string(),
address: Some("192.168.1.2".to_string()),
port: Some(8080),
address: "192.168.1.2".to_string(),
port: 8080,
..Default::default()
};
haproxy.servers.servers.push(server);

View File

@@ -50,6 +50,55 @@ pub struct ContinuousDelivery<A: OCICompliant + HelmPackage> {
}
impl<A: OCICompliant + HelmPackage> ContinuousDelivery<A> {
pub async fn deploy<T>(&self, topology: &T, helm_chart: String, image: String) -> Result<(), String>
where
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + 'static,
{
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
}
async fn deploy_to_local_k3d(
&self,
app_name: String,
@@ -153,50 +202,7 @@ impl<
// https://git.nationtech.io/NationTech/harmony/issues/104
let image = self.application.build_push_oci_image().await?;
// TODO: this is a temporary hack for demo purposes, the deployment target should be driven
// by the topology only and we should not have to know how to perform tasks like this for
// which the topology should be responsible.
//
// That said, this will require some careful architectural decisions, since the concept of
// deployment targets / profiles is probably a layer of complexity that we won't be
// completely able to avoid
//
// I'll try something for now that must be thought through after : att a deployment_profile
// function to the topology trait that returns a profile, then anybody who needs it can
// access it. This forces every Topology to understand the concept of targets though... So
// instead I'll create a new Capability which is MultiTargetTopology and we'll see how it
// goes. It still does not feel right though.
//
// https://git.nationtech.io/NationTech/harmony/issues/106
match topology.current_target() {
DeploymentTarget::LocalDev => {
info!("Deploying {} locally...", self.application.name());
self.deploy_to_local_k3d(self.application.name(), helm_chart, image)
.await?;
}
target => {
info!("Deploying {} to target {target:?}", self.application.name());
let score = ArgoHelmScore {
namespace: format!("{}", self.application.name()),
openshift: true,
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
version: Version::from("0.1.0").unwrap(),
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
helm_chart_name: format!("{}-chart", self.application.name()),
values_overrides: None,
name: format!("{}", self.application.name()),
namespace: format!("{}", self.application.name()),
})],
};
score
.interpret(&Inventory::empty(), topology)
.await
.unwrap();
}
};
Ok(())
self.deploy(topology, helm_chart, image).await
}
fn name(&self) -> String {
"ContinuousDelivery".to_string()

View File

@@ -1,5 +1,6 @@
mod endpoint;
pub mod rhob_monitoring;
mod multisite;
pub use endpoint::*;
mod monitoring;

View File

@@ -0,0 +1,49 @@
use std::sync::Arc;
use crate::modules::application::{Application, ApplicationFeature, StatelessApplication};
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
use crate::topology::{K8sAnywhereTopology, MultiTargetTopology};
use crate::{
inventory::Inventory,
modules::monitoring::{
alert_channel::webhook_receiver::WebhookReceiver, ntfy::ntfy::NtfyScore,
},
score::Score,
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
};
use crate::{
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
topology::oberservability::monitoring::AlertReceiver,
};
use async_trait::async_trait;
use base64::{Engine as _, engine::general_purpose};
use harmony_types::net::Url;
use log::{debug, info};
trait DebugTopology: Topology + std::fmt::Debug {}
#[derive(Debug, Clone)]
pub struct Multisite {
app: Arc<dyn StatelessApplication>,
secondary_site: Arc<K8sAnywhereTopology>,
}
#[async_trait]
impl<T: Topology> ApplicationFeature<T> for Multisite {
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
todo!(
"
- Find a way to get pvs for this application
- find the pv csi volumes uuid
- run rbd mirror image enable --pool mirrored-pool csi-vol-<UUID_PV> snapshot
- enjoy
"
)
}
fn name(&self) -> String {
"Multisite".to_string()
}
}

View File

@@ -2,6 +2,10 @@ mod feature;
pub mod features;
pub mod oci;
mod rust;
mod stateless;
mod stateful;
pub use stateless::*;
pub use stateful::*;
use std::sync::Arc;
pub use feature::*;

View File

@@ -206,7 +206,7 @@ impl RustWebapp {
}
}
///normalizes timestamp and ignores files that will bust the docker cach
///normalizes timestamp and ignores files that will bust the docker cache
async fn create_deterministic_tar(
&self,
project_root: &std::path::Path,

View File

@@ -0,0 +1,6 @@
use crate::modules::application::Application;
/// A StatefulApplication is an application bundle that writes persistent data.
///
/// This will enable backup features, stateful multisite replication, etc.
pub trait StatefulApplication: Application {}

View File

@@ -0,0 +1,26 @@
use crate::modules::application::{Application, features::ContinuousDeliveryApplication};
/// Marker trait for stateless application that can be deployed anywhere without worrying about
/// data.
///
/// This includes Applications fitting these categories :
///
/// - Application with all files built into the docker image and never written to, can be mounted
/// read-only
/// - Application writing to hard drive on ephemeral volume that can be lost at anytime and does
/// not require any replication/backup logic to operate
/// - Not supported : an application that writes state to a volume that must be shared or kept
/// to maintain a quorum across various instances
/// - Application connecting to a database/datastore accessible from anywhere such as
/// - Public bucket endpoint
/// - Publicly accessible
/// - Application connecting to a private database external to this application, accessible from the
/// deployment target
/// - Ensuring the private database is reachable is out of scope of this trait (for now)
///
/// The entire application definition **must not** require any persistent volume or include a
/// deployment component depending on persistent data such as a transitive PostgreSQL helm chart.
///
/// Typically, applications that can be autoscaled without additional complexity fit the
/// StatelessApplication requirements.
pub trait StatelessApplication: Application + ContinuousDeliveryApplication {}

View File

@@ -77,8 +77,6 @@ impl OKDBootstrapLoadBalancerScore {
address: topology.bootstrap_host.ip.to_string(),
port,
});
backend.dedup();
backend
}
}

View File

@@ -77,7 +77,7 @@ impl YaSerializeTrait for HAProxyId {
}
}
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Debug)]
pub struct HAProxyId(String);
impl Default for HAProxyId {
@@ -297,7 +297,7 @@ pub struct HAProxyFrontends {
pub frontend: Vec<Frontend>,
}
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct Frontend {
#[yaserde(attribute = true)]
pub uuid: String,
@@ -310,7 +310,7 @@ pub struct Frontend {
pub bind_options: MaybeString,
pub mode: String,
#[yaserde(rename = "defaultBackend")]
pub default_backend: Option<String>,
pub default_backend: String,
pub ssl_enabled: i32,
pub ssl_certificates: MaybeString,
pub ssl_default_certificate: MaybeString,
@@ -416,7 +416,7 @@ pub struct HAProxyBackends {
pub backends: Vec<HAProxyBackend>,
}
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyBackend {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@@ -535,7 +535,7 @@ pub struct HAProxyServers {
pub servers: Vec<HAProxyServer>,
}
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyServer {
#[yaserde(attribute = true, rename = "uuid")]
pub uuid: String,
@@ -543,8 +543,8 @@ pub struct HAProxyServer {
pub enabled: u8,
pub name: String,
pub description: MaybeString,
pub address: Option<String>,
pub port: Option<u16>,
pub address: String,
pub port: u16,
pub checkport: MaybeString,
pub mode: String,
pub multiplexer_protocol: MaybeString,
@@ -589,7 +589,7 @@ pub struct HAProxyHealthChecks {
pub healthchecks: Vec<HAProxyHealthCheck>,
}
#[derive(Clone, Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
pub struct HAProxyHealthCheck {
#[yaserde(attribute = true)]
pub uuid: String,

View File

@@ -25,7 +25,6 @@ sha2 = "0.10.9"
[dev-dependencies]
pretty_assertions.workspace = true
assertor.workspace = true
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(e2e_test)'] }

View File

@@ -30,7 +30,8 @@ impl SshConfigManager {
self.opnsense_shell
.exec(&format!(
"cp /conf/config.xml /conf/backup/{backup_filename}"
"cp /conf/config.xml /conf/backup/{}",
backup_filename
))
.await
}

View File

@@ -1,8 +1,10 @@
mod ssh;
use crate::Error;
use async_trait::async_trait;
pub use ssh::*;
use async_trait::async_trait;
use crate::Error;
#[async_trait]
pub trait OPNsenseShell: std::fmt::Debug + Send + Sync {
async fn exec(&self, command: &str) -> Result<String, Error>;

View File

@@ -1,8 +1,11 @@
use crate::{config::OPNsenseShell, Error};
use std::sync::Arc;
use log::warn;
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyHealthCheck, HAProxyServer, OPNsense,
};
use std::{collections::HashSet, sync::Arc};
use crate::{config::OPNsenseShell, Error};
pub struct LoadBalancerConfig<'a> {
opnsense: &'a mut OPNsense,
@@ -28,7 +31,7 @@ impl<'a> LoadBalancerConfig<'a> {
match &mut self.opnsense.opnsense.haproxy.as_mut() {
Some(haproxy) => f(haproxy),
None => unimplemented!(
"Cannot configure load balancer when haproxy config does not exist yet"
"Adding a backend is not supported when haproxy config does not exist yet"
),
}
}
@@ -37,67 +40,21 @@ impl<'a> LoadBalancerConfig<'a> {
self.with_haproxy(|haproxy| haproxy.general.enabled = enabled as i32);
}
/// Configures a service by removing any existing service on the same port
/// and then adding the new definition. This ensures idempotency.
pub fn configure_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.remove_service_by_bind_address(&frontend.bind);
self.remove_servers(&servers);
self.add_new_service(frontend, backend, servers, healthcheck);
pub fn add_backend(&mut self, backend: HAProxyBackend) {
warn!("TODO make sure this new backend does not refer non-existing entities like servers or health checks");
self.with_haproxy(|haproxy| haproxy.backends.backends.push(backend));
}
// Remove the corresponding real servers based on their name if they already exist.
fn remove_servers(&mut self, servers: &[HAProxyServer]) {
let server_names: HashSet<_> = servers.iter().map(|s| s.name.clone()).collect();
self.with_haproxy(|haproxy| {
haproxy
.servers
.servers
.retain(|s| !server_names.contains(&s.name));
});
pub fn add_frontend(&mut self, frontend: Frontend) {
self.with_haproxy(|haproxy| haproxy.frontends.frontend.push(frontend));
}
/// Removes a service and its dependent components based on the frontend's bind address.
/// This performs a cascading delete of the frontend, backend, servers, and health check.
fn remove_service_by_bind_address(&mut self, bind_address: &str) {
self.with_haproxy(|haproxy| {
let Some(old_frontend) = remove_frontend_by_bind_address(haproxy, bind_address) else {
return;
};
let Some(old_backend) = remove_backend(haproxy, old_frontend) else {
return;
};
remove_healthcheck(haproxy, &old_backend);
remove_linked_servers(haproxy, &old_backend);
});
pub fn add_healthcheck(&mut self, healthcheck: HAProxyHealthCheck) {
self.with_haproxy(|haproxy| haproxy.healthchecks.healthchecks.push(healthcheck));
}
/// Adds the components of a new service to the HAProxy configuration.
/// This function de-duplicates servers by name to prevent configuration errors.
fn add_new_service(
&mut self,
frontend: Frontend,
backend: HAProxyBackend,
servers: Vec<HAProxyServer>,
healthcheck: Option<HAProxyHealthCheck>,
) {
self.with_haproxy(|haproxy| {
if let Some(check) = healthcheck {
haproxy.healthchecks.healthchecks.push(check);
}
haproxy.servers.servers.extend(servers);
haproxy.backends.backends.push(backend);
haproxy.frontends.frontend.push(frontend);
});
pub fn add_servers(&mut self, mut servers: Vec<HAProxyServer>) {
self.with_haproxy(|haproxy| haproxy.servers.servers.append(&mut servers));
}
pub async fn reload_restart(&self) -> Result<(), Error> {
@@ -125,262 +82,3 @@ impl<'a> LoadBalancerConfig<'a> {
Ok(())
}
}
fn remove_frontend_by_bind_address(haproxy: &mut HAProxy, bind_address: &str) -> Option<Frontend> {
let pos = haproxy
.frontends
.frontend
.iter()
.position(|f| f.bind == bind_address);
match pos {
Some(pos) => Some(haproxy.frontends.frontend.remove(pos)),
None => None,
}
}
fn remove_backend(haproxy: &mut HAProxy, old_frontend: Frontend) -> Option<HAProxyBackend> {
let default_backend = old_frontend.default_backend?;
let pos = haproxy
.backends
.backends
.iter()
.position(|b| b.uuid == default_backend);
match pos {
Some(pos) => Some(haproxy.backends.backends.remove(pos)),
None => None, // orphaned frontend, shouldn't happen
}
}
fn remove_healthcheck(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(uuid) = &backend.health_check.content {
haproxy
.healthchecks
.healthchecks
.retain(|h| h.uuid != *uuid);
}
}
/// Remove the backend's servers. This assumes servers are not shared between services.
fn remove_linked_servers(haproxy: &mut HAProxy, backend: &HAProxyBackend) {
if let Some(server_uuids_str) = &backend.linked_servers.content {
let server_uuids_to_remove: HashSet<_> = server_uuids_str.split(',').collect();
haproxy
.servers
.servers
.retain(|s| !server_uuids_to_remove.contains(s.uuid.as_str()));
}
}
#[cfg(test)]
mod tests {
use crate::config::DummyOPNSenseShell;
use assertor::*;
use opnsense_config_xml::{
Frontend, HAProxy, HAProxyBackend, HAProxyBackends, HAProxyFrontends, HAProxyHealthCheck,
HAProxyHealthChecks, HAProxyId, HAProxyServer, HAProxyServers, MaybeString, OPNsense,
};
use std::sync::Arc;
use super::LoadBalancerConfig;
static SERVICE_BIND_ADDRESS: &str = "192.168.1.1:80";
static OTHER_SERVICE_BIND_ADDRESS: &str = "192.168.1.1:443";
static SERVER_ADDRESS: &str = "1.1.1.1:80";
static OTHER_SERVER_ADDRESS: &str = "1.1.1.1:443";
#[test]
fn configure_service_should_add_all_service_components_to_haproxy() {
let mut opnsense = given_opnsense();
let mut load_balancer = given_load_balancer(&mut opnsense);
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
load_balancer.configure_service(
frontend.clone(),
backend.clone(),
servers.clone(),
Some(healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend],
vec![backend],
servers,
vec![healthcheck],
);
}
#[test]
fn configure_service_should_replace_service_on_same_bind_address() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
let (updated_healthcheck, updated_servers, updated_backend, updated_frontend) =
given_service(SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
load_balancer.configure_service(
updated_frontend.clone(),
updated_backend.clone(),
updated_servers.clone(),
Some(updated_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![updated_frontend],
vec![updated_backend],
updated_servers,
vec![updated_healthcheck],
);
}
#[test]
fn configure_service_should_keep_existing_service_on_different_bind_addresses() {
let (healthcheck, servers, backend, frontend) =
given_service(SERVICE_BIND_ADDRESS, SERVER_ADDRESS);
let (other_healthcheck, other_servers, other_backend, other_frontend) =
given_service(OTHER_SERVICE_BIND_ADDRESS, OTHER_SERVER_ADDRESS);
let mut opnsense = given_opnsense_with(given_haproxy(
vec![frontend.clone()],
vec![backend.clone()],
servers.clone(),
vec![healthcheck.clone()],
));
let mut load_balancer = given_load_balancer(&mut opnsense);
load_balancer.configure_service(
other_frontend.clone(),
other_backend.clone(),
other_servers.clone(),
Some(other_healthcheck.clone()),
);
assert_haproxy_configured_with(
opnsense,
vec![frontend, other_frontend],
vec![backend, other_backend],
[servers, other_servers].concat(),
vec![healthcheck, other_healthcheck],
);
}
fn assert_haproxy_configured_with(
opnsense: OPNsense,
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) {
let haproxy = opnsense.opnsense.haproxy.as_ref().unwrap();
assert_that!(haproxy.frontends.frontend).contains_exactly(frontends);
assert_that!(haproxy.backends.backends).contains_exactly(backends);
assert_that!(haproxy.servers.servers).is_equal_to(servers);
assert_that!(haproxy.healthchecks.healthchecks).contains_exactly(healthchecks);
}
fn given_opnsense() -> OPNsense {
OPNsense::default()
}
fn given_opnsense_with(haproxy: HAProxy) -> OPNsense {
let mut opnsense = OPNsense::default();
opnsense.opnsense.haproxy = Some(haproxy);
opnsense
}
fn given_load_balancer<'a>(opnsense: &'a mut OPNsense) -> LoadBalancerConfig<'a> {
let opnsense_shell = Arc::new(DummyOPNSenseShell {});
if opnsense.opnsense.haproxy.is_none() {
opnsense.opnsense.haproxy = Some(HAProxy::default());
}
LoadBalancerConfig::new(opnsense, opnsense_shell)
}
fn given_service(
bind_address: &str,
server_address: &str,
) -> (
HAProxyHealthCheck,
Vec<HAProxyServer>,
HAProxyBackend,
Frontend,
) {
let healthcheck = given_healthcheck();
let servers = vec![given_server(server_address)];
let backend = given_backend();
let frontend = given_frontend(bind_address);
(healthcheck, servers, backend, frontend)
}
fn given_haproxy(
frontends: Vec<Frontend>,
backends: Vec<HAProxyBackend>,
servers: Vec<HAProxyServer>,
healthchecks: Vec<HAProxyHealthCheck>,
) -> HAProxy {
HAProxy {
frontends: HAProxyFrontends {
frontend: frontends,
},
backends: HAProxyBackends { backends },
servers: HAProxyServers { servers },
healthchecks: HAProxyHealthChecks { healthchecks },
..Default::default()
}
}
fn given_frontend(bind_address: &str) -> Frontend {
Frontend {
uuid: "uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: format!("frontend_{bind_address}"),
bind: bind_address.into(),
default_backend: Some("backend-uuid".into()),
..Default::default()
}
}
fn given_backend() -> HAProxyBackend {
HAProxyBackend {
uuid: "backend-uuid".into(),
id: HAProxyId::default(),
enabled: 1,
name: "backend_192.168.1.1:80".into(),
linked_servers: MaybeString::from("server-uuid"),
health_check_enabled: 1,
health_check: MaybeString::from("healthcheck-uuid"),
..Default::default()
}
}
fn given_server(address: &str) -> HAProxyServer {
HAProxyServer {
uuid: "server-uuid".into(),
id: HAProxyId::default(),
name: address.into(),
address: Some(address.into()),
..Default::default()
}
}
fn given_healthcheck() -> HAProxyHealthCheck {
HAProxyHealthCheck {
uuid: "healthcheck-uuid".into(),
name: "healthcheck".into(),
..Default::default()
}
}
}