6.9 KiB
6.9 KiB
Phase 5: E2E Tests for PostgreSQL & RustFS
Goal
Establish an automated E2E test pipeline that proves working examples actually work. Start with the two simplest k8s-based examples: PostgreSQL and RustFS.
Prerequisites
- Phase 1 complete (config crate works, bootstrap is clean)
feat/rustfsbranch merged
Architecture
Test harness: tests/e2e/
A dedicated workspace member crate at tests/e2e/ that contains:
- Shared k3d utilities — create/destroy clusters, wait for readiness
- Per-example test modules — each example gets a
#[tokio::test]function - Assertion helpers — wait for pods, check CRDs exist, verify services
tests/
e2e/
Cargo.toml
src/
lib.rs # Shared test utilities
k3d.rs # k3d cluster lifecycle
k8s_assert.rs # K8s assertion helpers
tests/
postgresql.rs # PostgreSQL E2E test
rustfs.rs # RustFS E2E test
k3d cluster lifecycle
// tests/e2e/src/k3d.rs
use k3d_rs::K3d;
pub struct TestCluster {
pub name: String,
pub k3d: K3d,
pub client: kube::Client,
reuse: bool,
}
impl TestCluster {
/// Creates a k3d cluster for testing.
/// If HARMONY_E2E_REUSE_CLUSTER=1, reuses existing cluster.
pub async fn ensure(name: &str) -> Result<Self, String> {
let reuse = std::env::var("HARMONY_E2E_REUSE_CLUSTER")
.map(|v| v == "1")
.unwrap_or(false);
let base_dir = PathBuf::from("/tmp/harmony-e2e");
let k3d = K3d::new(base_dir, Some(name.to_string()));
let client = k3d.ensure_installed().await?;
Ok(Self { name: name.to_string(), k3d, client, reuse })
}
/// Returns the kubeconfig path for this cluster.
pub fn kubeconfig_path(&self) -> String { ... }
}
impl Drop for TestCluster {
fn drop(&mut self) {
if !self.reuse {
// Best-effort cleanup
let _ = self.k3d.run_k3d_command(["cluster", "delete", &self.name]);
}
}
}
K8s assertion helpers
// tests/e2e/src/k8s_assert.rs
/// Wait until a pod matching the label selector is Running in the namespace.
/// Times out after `timeout` duration.
pub async fn wait_for_pod_running(
client: &kube::Client,
namespace: &str,
label_selector: &str,
timeout: Duration,
) -> Result<(), String>
/// Assert a CRD instance exists.
pub async fn assert_resource_exists<K: kube::Resource>(
client: &kube::Client,
name: &str,
namespace: Option<&str>,
) -> Result<(), String>
/// Install a Helm chart. Returns when all pods in the release are running.
pub async fn helm_install(
release_name: &str,
chart: &str,
namespace: &str,
repo_url: Option<&str>,
timeout: Duration,
) -> Result<(), String>
Tasks
5.1 Create the tests/e2e/ crate
Add to workspace Cargo.toml:
[workspace]
members = [
# ... existing members
"tests/e2e",
]
tests/e2e/Cargo.toml:
[package]
name = "harmony-e2e-tests"
edition = "2024"
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
k3d_rs = { path = "../../k3d", package = "k3d_rs" }
kube = { workspace = true }
k8s-openapi = { workspace = true }
tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
5.2 PostgreSQL E2E test
// tests/e2e/tests/postgresql.rs
use harmony::modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig};
use harmony::topology::K8sAnywhereTopology;
use harmony::inventory::Inventory;
use harmony::maestro::Maestro;
#[tokio::test]
async fn test_postgresql_deploys_on_k3d() {
let cluster = TestCluster::ensure("harmony-e2e-pg").await.unwrap();
// Install CNPG operator via Helm
// (K8sAnywhereTopology::ensure_ready() now handles this since
// commit e1183ef "K8s postgresql score now ensures cnpg is installed")
// But we may need the Helm chart for non-OKD:
helm_install(
"cnpg",
"cloudnative-pg",
"cnpg-system",
Some("https://cloudnative-pg.github.io/charts"),
Duration::from_secs(120),
).await.unwrap();
// Configure topology pointing to test cluster
let config = K8sAnywhereConfig {
kubeconfig: Some(cluster.kubeconfig_path()),
use_local_k3d: false,
autoinstall: false,
use_system_kubeconfig: false,
harmony_profile: "dev".to_string(),
k8s_context: None,
};
let topology = K8sAnywhereTopology::with_config(config);
// Create and run the score
let score = PostgreSQLScore {
config: PostgreSQLConfig {
cluster_name: "e2e-test-pg".to_string(),
namespace: "e2e-pg-test".to_string(),
..Default::default()
},
};
let mut maestro = Maestro::initialize(Inventory::autoload(), topology).await.unwrap();
maestro.register_all(vec![Box::new(score)]);
let scores = maestro.scores().read().unwrap().first().unwrap().clone_box();
let result = maestro.interpret(scores).await;
assert!(result.is_ok(), "PostgreSQL score failed: {:?}", result.err());
// Assert: CNPG Cluster resource exists
// (the Cluster CRD is applied — pod readiness may take longer)
let client = cluster.client.clone();
// ... assert Cluster CRD exists in e2e-pg-test namespace
}
5.3 RustFS E2E test
Similar structure. Details depend on what the RustFS score deploys (likely a Helm chart or k8s resources for MinIO/RustFS).
#[tokio::test]
async fn test_rustfs_deploys_on_k3d() {
let cluster = TestCluster::ensure("harmony-e2e-rustfs").await.unwrap();
// ... similar pattern: configure topology, create score, interpret, assert
}
5.4 CI job for E2E tests
New workflow file (Gitea or GitHub Actions):
# .gitea/workflows/e2e.yml (or .github/workflows/e2e.yml)
name: E2E Tests
on:
push:
branches: [master, main]
# Don't run on every PR — too slow. Run on label or manual trigger.
workflow_dispatch:
jobs:
e2e:
runs-on: self-hosted # Must have Docker available for k3d
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install k3d
run: curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
- name: Run E2E tests
run: cargo test -p harmony-e2e-tests -- --test-threads=1
env:
RUST_LOG: info
Note --test-threads=1: E2E tests create k3d clusters and should not run in parallel (port conflicts, resource contention).
Deliverables
tests/e2e/crate added to workspace- Shared test utilities:
TestCluster,wait_for_pod_running,helm_install - PostgreSQL E2E test passing
- RustFS E2E test passing (after
feat/rustfsmerge) - CI job running E2E tests on push to main
HARMONY_E2E_REUSE_CLUSTER=1for fast local iteration