Some checks failed
Run Check Script / check (pull_request) Failing after 19s
464 lines
15 KiB
Rust
464 lines
15 KiB
Rust
//! End-to-end tests against a real OPNsense instance.
|
|
//!
|
|
//! These tests are `#[ignore]`d by default and require:
|
|
//!
|
|
//! - `OPNSENSE_TEST_URL`: API base URL (e.g. `https://10.99.99.1/api`)
|
|
//! - `OPNSENSE_TEST_KEY`: API key
|
|
//! - `OPNSENSE_TEST_SECRET`: API secret
|
|
//!
|
|
//! Run with:
|
|
//! ```text
|
|
//! cargo test -p opnsense-api --test e2e_test -- --ignored
|
|
//! ```
|
|
//!
|
|
//! WARNING: These tests create/delete entities on the target OPNsense
|
|
//! instance. Do NOT run against a production firewall.
|
|
|
|
use opnsense_api::client::OpnsenseClient;
|
|
use opnsense_api::response::UuidResponse;
|
|
use std::env;
|
|
|
|
fn test_client() -> OpnsenseClient {
|
|
let url = env::var("OPNSENSE_TEST_URL")
|
|
.expect("OPNSENSE_TEST_URL must be set (e.g. https://10.99.99.1/api)");
|
|
let key = env::var("OPNSENSE_TEST_KEY").expect("OPNSENSE_TEST_KEY must be set");
|
|
let secret = env::var("OPNSENSE_TEST_SECRET").expect("OPNSENSE_TEST_SECRET must be set");
|
|
|
|
OpnsenseClient::builder()
|
|
.base_url(&url)
|
|
.auth_from_key_secret(&key, &secret)
|
|
.skip_tls_verify()
|
|
.timeout_secs(60)
|
|
.build()
|
|
.expect("failed to build test client")
|
|
}
|
|
|
|
// ── Firmware / core ─────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_firmware_info() {
|
|
let client = test_client();
|
|
let info: serde_json::Value = client
|
|
.get_typed("core", "firmware", "status")
|
|
.await
|
|
.expect("firmware status call failed");
|
|
|
|
assert!(
|
|
info.get("product").is_some(),
|
|
"firmware status must contain 'product' key"
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_firmware_package_list() {
|
|
let client = test_client();
|
|
let info: serde_json::Value = client
|
|
.get_typed("core", "firmware", "info")
|
|
.await
|
|
.expect("firmware info call failed");
|
|
|
|
let packages = info["package"].as_array();
|
|
assert!(
|
|
packages.is_some() && !packages.unwrap().is_empty(),
|
|
"firmware info must contain non-empty 'package' array"
|
|
);
|
|
}
|
|
|
|
// ── Dnsmasq ─────────────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_dnsmasq_settings_get() {
|
|
let client = test_client();
|
|
let resp: serde_json::Value = client
|
|
.get_typed("dnsmasq", "settings", "get")
|
|
.await
|
|
.expect("dnsmasq settings/get failed");
|
|
|
|
assert!(
|
|
resp.get("dnsmasq").is_some(),
|
|
"response must contain 'dnsmasq' key"
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_dnsmasq_crud_host() {
|
|
let client = test_client();
|
|
|
|
// Create
|
|
let body = serde_json::json!({
|
|
"host": {
|
|
"host": "e2e-test-host",
|
|
"ip": "10.255.255.250",
|
|
"hwaddr": "E2:E2:E2:E2:E2:E2",
|
|
"domain": "test.local",
|
|
"local": "1"
|
|
}
|
|
});
|
|
let add_resp: UuidResponse = client
|
|
.add_item("dnsmasq", "settings", "Host", &body)
|
|
.await
|
|
.expect("addHost failed");
|
|
let uuid = &add_resp.uuid;
|
|
assert!(!uuid.is_empty(), "addHost must return a UUID");
|
|
|
|
// Read back
|
|
let get_resp: serde_json::Value = client
|
|
.get_item("dnsmasq", "settings", "Host", uuid)
|
|
.await
|
|
.expect("getHost failed");
|
|
let host_data = &get_resp["host"];
|
|
assert_eq!(host_data["host"].as_str(), Some("e2e-test-host"));
|
|
|
|
// Delete
|
|
client
|
|
.del_item("dnsmasq", "settings", "Host", uuid)
|
|
.await
|
|
.expect("delHost failed");
|
|
|
|
// Verify deleted
|
|
let settings: serde_json::Value = client
|
|
.get_typed("dnsmasq", "settings", "get")
|
|
.await
|
|
.expect("settings/get failed");
|
|
let hosts = &settings["dnsmasq"]["hosts"];
|
|
assert!(
|
|
!hosts.as_object().unwrap().contains_key(uuid),
|
|
"host should be deleted"
|
|
);
|
|
|
|
// Reconfigure to clean up
|
|
client
|
|
.reconfigure("dnsmasq")
|
|
.await
|
|
.expect("reconfigure failed");
|
|
}
|
|
|
|
// ── Dnsmasq via opnsense-config ─────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_dnsmasq_add_static_mapping_via_config() {
|
|
use opnsense_config::modules::dnsmasq::DhcpConfigDnsMasq;
|
|
use std::net::Ipv4Addr;
|
|
use std::sync::Arc;
|
|
|
|
let client = test_client();
|
|
|
|
// Create a DummyShell that won't be used (no SSH ops in this test)
|
|
struct NoopShell;
|
|
impl std::fmt::Debug for NoopShell {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
f.write_str("NoopShell")
|
|
}
|
|
}
|
|
#[async_trait::async_trait]
|
|
impl opnsense_config::config::OPNsenseShell for NoopShell {
|
|
async fn exec(&self, _: &str) -> Result<String, opnsense_config::Error> {
|
|
Ok(String::new())
|
|
}
|
|
async fn write_content_to_temp_file(
|
|
&self,
|
|
_: &str,
|
|
) -> Result<String, opnsense_config::Error> {
|
|
Ok(String::new())
|
|
}
|
|
async fn write_content_to_file(
|
|
&self,
|
|
_: &str,
|
|
_: &str,
|
|
) -> Result<String, opnsense_config::Error> {
|
|
Ok(String::new())
|
|
}
|
|
async fn upload_folder(&self, _: &str, _: &str) -> Result<String, opnsense_config::Error> {
|
|
Ok(String::new())
|
|
}
|
|
}
|
|
|
|
let dhcp = DhcpConfigDnsMasq::new(client.clone(), Arc::new(NoopShell));
|
|
|
|
// Add a static mapping
|
|
dhcp.add_static_mapping(
|
|
&["E2:E2:E2:E2:E2:01".to_string()],
|
|
&Ipv4Addr::new(10, 255, 255, 251),
|
|
"e2e-config-test",
|
|
)
|
|
.await
|
|
.expect("add_static_mapping failed");
|
|
|
|
// Verify it exists via raw API
|
|
let settings: serde_json::Value = client
|
|
.get_typed("dnsmasq", "settings", "get")
|
|
.await
|
|
.unwrap();
|
|
let hosts = settings["dnsmasq"]["hosts"].as_object().unwrap();
|
|
let found = hosts
|
|
.values()
|
|
.any(|h| h["host"].as_str() == Some("e2e-config-test"));
|
|
assert!(found, "host should exist after add_static_mapping");
|
|
|
|
// Clean up: find and delete the host
|
|
for (uuid, h) in hosts {
|
|
if h["host"].as_str() == Some("e2e-config-test") {
|
|
client
|
|
.del_item("dnsmasq", "settings", "Host", uuid)
|
|
.await
|
|
.unwrap();
|
|
}
|
|
}
|
|
client.reconfigure("dnsmasq").await.unwrap();
|
|
}
|
|
|
|
// ── HAProxy ─────────────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_haproxy_settings_get() {
|
|
let client = test_client();
|
|
let resp: serde_json::Value = client
|
|
.get_typed("haproxy", "settings", "get")
|
|
.await
|
|
.expect("haproxy settings/get failed — is os-haproxy installed?");
|
|
|
|
assert!(
|
|
resp.get("haproxy").is_some(),
|
|
"response must contain 'haproxy' key"
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_haproxy_crud_server() {
|
|
let client = test_client();
|
|
|
|
// Create a server
|
|
let body = serde_json::json!({
|
|
"server": {
|
|
"name": "e2e-test-server",
|
|
"address": "10.255.255.252",
|
|
"port": "8080",
|
|
"enabled": "1",
|
|
"mode": "active",
|
|
"type": "static"
|
|
}
|
|
});
|
|
let add_resp: UuidResponse = client
|
|
.add_item("haproxy", "settings", "Server", &body)
|
|
.await
|
|
.expect("addServer failed");
|
|
let uuid = &add_resp.uuid;
|
|
assert!(!uuid.is_empty());
|
|
|
|
// Delete
|
|
client
|
|
.del_item("haproxy", "settings", "Server", uuid)
|
|
.await
|
|
.expect("delServer failed");
|
|
|
|
// Reconfigure
|
|
client
|
|
.reconfigure("haproxy")
|
|
.await
|
|
.expect("reconfigure failed");
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_haproxy_configure_service_via_config() {
|
|
use opnsense_config::modules::load_balancer::*;
|
|
|
|
let client = test_client();
|
|
let lb = LoadBalancerConfig::new(client.clone());
|
|
|
|
// Configure a test service
|
|
let frontend = LbFrontend {
|
|
name: "e2e_frontend_test".to_string(),
|
|
bind: "10.255.255.253:19999".to_string(),
|
|
mode: "tcp".to_string(),
|
|
enabled: true,
|
|
default_backend: None,
|
|
stickiness_expire: None,
|
|
stickiness_size: None,
|
|
stickiness_conn_rate_period: None,
|
|
stickiness_sess_rate_period: None,
|
|
stickiness_http_req_rate_period: None,
|
|
stickiness_http_err_rate_period: None,
|
|
stickiness_bytes_in_rate_period: None,
|
|
stickiness_bytes_out_rate_period: None,
|
|
ssl_hsts_max_age: None,
|
|
};
|
|
let backend = LbBackend {
|
|
name: "e2e_backend_test".to_string(),
|
|
mode: "tcp".to_string(),
|
|
algorithm: "roundrobin".to_string(),
|
|
enabled: true,
|
|
health_check_enabled: false,
|
|
random_draws: Some(2),
|
|
stickiness_expire: None,
|
|
stickiness_size: None,
|
|
stickiness_conn_rate_period: None,
|
|
stickiness_sess_rate_period: None,
|
|
stickiness_http_req_rate_period: None,
|
|
stickiness_http_err_rate_period: None,
|
|
stickiness_bytes_in_rate_period: None,
|
|
stickiness_bytes_out_rate_period: None,
|
|
};
|
|
let servers = vec![LbServer {
|
|
name: "e2e_server_test".to_string(),
|
|
address: "10.255.255.254".to_string(),
|
|
port: 8080,
|
|
enabled: true,
|
|
mode: "active".to_string(),
|
|
server_type: "static".to_string(),
|
|
}];
|
|
|
|
lb.configure_service(frontend, backend, servers, None)
|
|
.await
|
|
.expect("configure_service failed");
|
|
|
|
// Verify via list_services
|
|
let services = lb.list_services().await.expect("list_services failed");
|
|
let found = services.iter().any(|s| s.bind == "10.255.255.253:19999");
|
|
assert!(found, "configured service should appear in list_services");
|
|
|
|
// Idempotent: configure again with same bind
|
|
let frontend2 = LbFrontend {
|
|
name: "e2e_frontend_test_v2".to_string(),
|
|
bind: "10.255.255.253:19999".to_string(),
|
|
mode: "tcp".to_string(),
|
|
enabled: true,
|
|
default_backend: None,
|
|
stickiness_expire: None,
|
|
stickiness_size: None,
|
|
stickiness_conn_rate_period: None,
|
|
stickiness_sess_rate_period: None,
|
|
stickiness_http_req_rate_period: None,
|
|
stickiness_http_err_rate_period: None,
|
|
stickiness_bytes_in_rate_period: None,
|
|
stickiness_bytes_out_rate_period: None,
|
|
ssl_hsts_max_age: None,
|
|
};
|
|
let backend2 = LbBackend {
|
|
name: "e2e_backend_test_v2".to_string(),
|
|
mode: "tcp".to_string(),
|
|
algorithm: "roundrobin".to_string(),
|
|
enabled: true,
|
|
health_check_enabled: false,
|
|
random_draws: Some(2),
|
|
stickiness_expire: None,
|
|
stickiness_size: None,
|
|
stickiness_conn_rate_period: None,
|
|
stickiness_sess_rate_period: None,
|
|
stickiness_http_req_rate_period: None,
|
|
stickiness_http_err_rate_period: None,
|
|
stickiness_bytes_in_rate_period: None,
|
|
stickiness_bytes_out_rate_period: None,
|
|
};
|
|
let servers2 = vec![LbServer {
|
|
name: "e2e_server_test_v2".to_string(),
|
|
address: "10.255.255.253".to_string(),
|
|
port: 9090,
|
|
enabled: true,
|
|
mode: "active".to_string(),
|
|
server_type: "static".to_string(),
|
|
}];
|
|
|
|
lb.configure_service(frontend2, backend2, servers2, None)
|
|
.await
|
|
.expect("idempotent configure_service failed");
|
|
|
|
// Verify only one service on that bind
|
|
let services2 = lb.list_services().await.expect("list_services failed");
|
|
let count = services2
|
|
.iter()
|
|
.filter(|s| s.bind == "10.255.255.253:19999")
|
|
.count();
|
|
assert_eq!(count, 1, "should have exactly one service on the bind");
|
|
|
|
// Clean up: configure with same bind removes the old service, then delete the new one
|
|
// We re-use configure_service with the same bind to trigger cascade delete,
|
|
// then manually delete the newly created entities.
|
|
let config: serde_json::Value = client
|
|
.get_typed("haproxy", "settings", "get")
|
|
.await
|
|
.unwrap();
|
|
if let Some(frontends) = config["haproxy"]["frontends"]["frontend"].as_object() {
|
|
for (uuid, fe) in frontends {
|
|
if fe["bind"].as_str() == Some("10.255.255.253:19999") {
|
|
if let Some(be_uuid) = fe["defaultBackend"].as_str() {
|
|
if let Some(be) = config["haproxy"]["backends"]["backend"].get(be_uuid) {
|
|
if let Some(srv_csv) = be["linkedServers"].as_str() {
|
|
for srv_uuid in srv_csv.split(',').filter(|s: &&str| !s.is_empty()) {
|
|
let _ = client
|
|
.del_item("haproxy", "settings", "Server", srv_uuid)
|
|
.await;
|
|
}
|
|
}
|
|
}
|
|
let _ = client
|
|
.del_item("haproxy", "settings", "Backend", be_uuid)
|
|
.await;
|
|
}
|
|
let _ = client
|
|
.del_item("haproxy", "settings", "Frontend", uuid)
|
|
.await;
|
|
}
|
|
}
|
|
}
|
|
client.reconfigure("haproxy").await.unwrap();
|
|
}
|
|
|
|
// ── VLAN ────────────────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_vlan_settings_get() {
|
|
let client = test_client();
|
|
let resp: serde_json::Value = client
|
|
.get_typed("interfaces", "vlan_settings", "get")
|
|
.await
|
|
.expect("vlan settings/get failed");
|
|
|
|
assert!(
|
|
resp.get("vlan").is_some(),
|
|
"response must contain 'vlan' key"
|
|
);
|
|
}
|
|
|
|
// ── WireGuard ───────────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_wireguard_settings_get() {
|
|
let client = test_client();
|
|
let resp: serde_json::Value = client
|
|
.get_typed("wireguard", "general", "get")
|
|
.await
|
|
.expect("wireguard general/get failed");
|
|
|
|
assert!(
|
|
resp.get("general").is_some(),
|
|
"response must contain 'general' key"
|
|
);
|
|
}
|
|
|
|
// ── Firewall ────────────────────────────────────────────────────────────
|
|
|
|
#[tokio::test]
|
|
#[ignore]
|
|
async fn e2e_firewall_filter_get() {
|
|
let client = test_client();
|
|
let resp: serde_json::Value = client
|
|
.get_typed("firewall", "filter", "get")
|
|
.await
|
|
.expect("firewall filter/get failed");
|
|
|
|
assert!(
|
|
resp.get("filter").is_some(),
|
|
"response must contain 'filter' key"
|
|
);
|
|
}
|