Compare commits
50 Commits
fix/refact
...
feat/confi
| Author | SHA1 | Date | |
|---|---|---|---|
| 6a57361356 | |||
| d0d4f15122 | |||
| 93b83b8161 | |||
| 6ca8663422 | |||
| f6ce0c6d4f | |||
| 8a1eca21f7 | |||
| 9d2308eca6 | |||
| ccc26e07eb | |||
| 9a67bcc96f | |||
| a377fc1404 | |||
| c9977fee12 | |||
| 64bf585e07 | |||
| 44e2c45435 | |||
| cdccbc8939 | |||
| 9830971d05 | |||
| e1183ef6de | |||
| 444fea81b8 | |||
| 907ae04195 | |||
| 64582caa64 | |||
| f5736fcc37 | |||
| 7a1e84fb68 | |||
| 8499f4d1b7 | |||
| 231d9b878e | |||
| ee2dade0be | |||
| aa07f4c8ad | |||
| 77bb138497 | |||
| a16879b1b6 | |||
| f57e6f5957 | |||
| 7605d05de3 | |||
| b244127843 | |||
| 67c3265286 | |||
| d10598d01e | |||
| 61ba7257d0 | |||
| 8798110bf3 | |||
| 1508d431c0 | |||
| caf6f0c67b | |||
| b0e9594d92 | |||
| bfb86f63ce | |||
| d920de34cf | |||
| 4276b9137b | |||
| 6ab88ab8d9 | |||
| 53d0704a35 | |||
| de49e9ebcc | |||
| d8ab9d52a4 | |||
| 2cb7aeefc0 | |||
| 16016febcf | |||
| e709de531d | |||
| 6ab0f3a6ab | |||
| 724ab0b888 | |||
| 8b6ce8d069 |
@@ -15,4 +15,4 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run check script
|
||||
run: bash check.sh
|
||||
run: bash build/check.sh
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -29,3 +29,6 @@ Cargo.lock
|
||||
|
||||
# Useful to create ignore folders for temp files and notes
|
||||
ignore
|
||||
|
||||
# Generated book
|
||||
book
|
||||
|
||||
1657
Cargo.lock
generated
1657
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
19
Cargo.toml
19
Cargo.toml
@@ -1,6 +1,7 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"examples/*",
|
||||
"private_repos/*",
|
||||
"harmony",
|
||||
"harmony_types",
|
||||
@@ -15,10 +16,16 @@ members = [
|
||||
"harmony_inventory_agent",
|
||||
"harmony_secret_derive",
|
||||
"harmony_secret",
|
||||
"adr/agent_discovery/mdns",
|
||||
"brocade",
|
||||
"harmony_agent",
|
||||
"harmony_agent/deploy", "harmony_node_readiness", "harmony-k8s",
|
||||
"examples/kvm_okd_ha_cluster",
|
||||
"examples/example_linux_vm",
|
||||
"harmony_config_derive",
|
||||
"harmony_config",
|
||||
"brocade",
|
||||
"harmony_agent",
|
||||
"harmony_agent/deploy",
|
||||
"harmony_node_readiness",
|
||||
"harmony-k8s",
|
||||
"harmony_assets",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -33,6 +40,7 @@ derive-new = "0.7"
|
||||
async-trait = "0.1"
|
||||
tokio = { version = "1.40", features = [
|
||||
"io-std",
|
||||
"io-util",
|
||||
"fs",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
@@ -69,6 +77,7 @@ base64 = "0.22.1"
|
||||
tar = "0.4.44"
|
||||
lazy_static = "1.5.0"
|
||||
directories = "6.0.0"
|
||||
futures-util = "0.3"
|
||||
thiserror = "2.0.14"
|
||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.127"
|
||||
@@ -82,3 +91,5 @@ reqwest = { version = "0.12", features = [
|
||||
"json",
|
||||
], default-features = false }
|
||||
assertor = "0.0.4"
|
||||
tokio-test = "0.4"
|
||||
anyhow = "1.0"
|
||||
|
||||
272
README.md
272
README.md
@@ -1,101 +1,121 @@
|
||||
# Harmony
|
||||
|
||||
Open-source infrastructure orchestration that treats your platform like first-class code.
|
||||
**Infrastructure orchestration that treats your platform like first-class code.**
|
||||
|
||||
In other words, Harmony is a **next-generation platform engineering framework**.
|
||||
Harmony is an open-source framework that brings the rigor of software engineering to infrastructure management. Write Rust code to define what you want, and Harmony handles the rest — from local development to production clusters.
|
||||
|
||||
_By [NationTech](https://nationtech.io)_
|
||||
|
||||
[](https://git.nationtech.io/nationtech/harmony)
|
||||
[](https://git.nationtech.io/NationTech/harmony)
|
||||
[](LICENSE)
|
||||
|
||||
### Unify
|
||||
---
|
||||
|
||||
- **Project Scaffolding**
|
||||
- **Infrastructure Provisioning**
|
||||
- **Application Deployment**
|
||||
- **Day-2 operations**
|
||||
## The Problem Harmony Solves
|
||||
|
||||
All in **one strongly-typed Rust codebase**.
|
||||
Modern infrastructure is messy. Your Kubernetes cluster needs monitoring. Your bare-metal servers need provisioning. Your applications need deployments. Each comes with its own tooling, its own configuration format, and its own failure modes.
|
||||
|
||||
### Deploy anywhere
|
||||
**What if you could describe your entire platform in one consistent language?**
|
||||
|
||||
From a **developer laptop** to a **global production cluster**, a single **source of truth** drives the **full software lifecycle.**
|
||||
That's Harmony. It unifies project scaffolding, infrastructure provisioning, application deployment, and day-2 operations into a single strongly-typed Rust codebase.
|
||||
|
||||
## The Harmony Philosophy
|
||||
---
|
||||
|
||||
Infrastructure is essential, but it shouldn’t be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
|
||||
## Three Principles That Make the Difference
|
||||
|
||||
| Principle | What it means for you |
|
||||
| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Infrastructure as Resilient Code** | Replace sprawling YAML and bash scripts with type-safe Rust. Test, refactor, and version your platform just like application code. |
|
||||
| **Prove It Works — Before You Deploy** | Harmony uses the compiler to verify that your application’s needs match the target environment’s capabilities at **compile-time**, eliminating an entire class of runtime outages. |
|
||||
| **One Unified Model** | Software and infrastructure are a single system. Harmony models them together, enabling deep automation—from bare-metal servers to Kubernetes workloads—with zero context switching. |
|
||||
| Principle | What It Means |
|
||||
|-----------|---------------|
|
||||
| **Infrastructure as Resilient Code** | Stop fighting with YAML and bash. Write type-safe Rust that you can test, version, and refactor like any other code. |
|
||||
| **Prove It Works Before You Deploy** | Harmony verifies at _compile time_ that your application can actually run on your target infrastructure. No more "the config looks right but it doesn't work" surprises. |
|
||||
| **One Unified Model** | Software and infrastructure are one system. Deploy from laptop to production cluster without switching contexts or tools. |
|
||||
|
||||
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
|
||||
---
|
||||
|
||||
## Where to Start
|
||||
## How It Works: The Core Concepts
|
||||
|
||||
We have a comprehensive set of documentation right here in the repository.
|
||||
Harmony is built around three concepts that work together:
|
||||
|
||||
| I want to... | Start Here |
|
||||
| ----------------- | ------------------------------------------------------------------ |
|
||||
| Get Started | [Getting Started Guide](./docs/guides/getting-started.md) |
|
||||
| See an Example | [Use Case: Deploy a Rust Web App](./docs/use-cases/rust-webapp.md) |
|
||||
| Explore | [Documentation Hub](./docs/README.md) |
|
||||
| See Core Concepts | [Core Concepts Explained](./docs/concepts.md) |
|
||||
### Score — "What You Want"
|
||||
|
||||
## Quick Look: Deploy a Rust Webapp
|
||||
A `Score` is a declarative description of desired state. Think of it as a "recipe" that says _what_ you want without specifying _how_ to get there.
|
||||
|
||||
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
|
||||
```rust
|
||||
// "I want a PostgreSQL cluster running with default settings"
|
||||
let postgres = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(),
|
||||
namespace: "harmony-postgres-example".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
### Topology — "Where It Goes"
|
||||
|
||||
A `Topology` represents your infrastructure environment and its capabilities. It answers the question: "What can this environment actually do?"
|
||||
|
||||
```rust
|
||||
// Deploy to a local K3D cluster, or any Kubernetes cluster via environment variables
|
||||
K8sAnywhereTopology::from_env()
|
||||
```
|
||||
|
||||
### Interpret — "How It Happens"
|
||||
|
||||
An `Interpret` is the execution logic that connects your `Score` to your `Topology`. It translates "what you want" into "what the infrastructure does."
|
||||
|
||||
**The Compile-Time Check:** Before your code ever runs, Harmony verifies that your `Score` is compatible with your `Topology`. If your application needs a feature your infrastructure doesn't provide, you get a compile error — not a runtime failure.
|
||||
|
||||
---
|
||||
|
||||
## What You Can Deploy
|
||||
|
||||
Harmony ships with ready-made Scores for:
|
||||
|
||||
**Data Services**
|
||||
- PostgreSQL clusters (via CloudNativePG operator)
|
||||
- Multi-site PostgreSQL with failover
|
||||
|
||||
**Kubernetes**
|
||||
- Namespaces, Deployments, Ingress
|
||||
- Helm charts
|
||||
- cert-manager for TLS
|
||||
- Monitoring (Prometheus, alerting, ntfy)
|
||||
|
||||
**Bare Metal / Infrastructure**
|
||||
- OKD clusters from scratch
|
||||
- OPNsense firewalls
|
||||
- Network services (DNS, DHCP, TFTP)
|
||||
- Brocade switch configuration
|
||||
|
||||
**And more:** Application deployment, tenant management, load balancing, and more.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start: Deploy a PostgreSQL Cluster
|
||||
|
||||
This example provisions a local Kubernetes cluster (K3D) and deploys a PostgreSQL cluster on it — no external infrastructure required.
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
application::{
|
||||
ApplicationScore, RustWebFramework, RustWebapp,
|
||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
||||
},
|
||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
},
|
||||
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_macros::hurl;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let application = Arc::new(RustWebapp {
|
||||
name: "harmony-example-leptos".to_string(),
|
||||
project_root: PathBuf::from(".."), // <== Your project root, usually .. if you use the standard `/harmony` folder
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
service_port: 8080,
|
||||
});
|
||||
|
||||
// Define your Application deployment and the features you want
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(PackagingDeployment {
|
||||
application: application.clone(),
|
||||
}),
|
||||
Box::new(Monitoring {
|
||||
application: application.clone(),
|
||||
alert_receiver: vec![
|
||||
Box::new(DiscordWebhook {
|
||||
name: "test-discord".to_string(),
|
||||
url: hurl!("https://discord.doesnt.exist.com"), // <== Get your discord webhook url
|
||||
}),
|
||||
],
|
||||
}),
|
||||
],
|
||||
application,
|
||||
let postgres = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(),
|
||||
namespace: "harmony-postgres-example".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(), // <== Deploy to local automatically provisioned local k3d by default or connect to any kubernetes cluster
|
||||
vec![Box::new(app)],
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(postgres)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
@@ -103,40 +123,128 @@ async fn main() {
|
||||
}
|
||||
```
|
||||
|
||||
To run this:
|
||||
### What this actually does
|
||||
|
||||
- Clone the repository: `git clone https://git.nationtech.io/nationtech/harmony`
|
||||
- Install dependencies: `cargo build --release`
|
||||
- Run the example: `cargo run --example try_rust_webapp`
|
||||
When you compile and run this program:
|
||||
|
||||
1. **Compiles** the Harmony Score into an executable
|
||||
2. **Connects** to `K8sAnywhereTopology` — which auto-provisions a local K3D cluster if none exists
|
||||
3. **Installs** the CloudNativePG operator into the cluster (one-time setup)
|
||||
4. **Creates** a PostgreSQL cluster with 1 instance and 1 GiB of storage
|
||||
5. **Exposes** the PostgreSQL instance as a Kubernetes Service
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Rust](https://rust-lang.org/tools/install) (edition 2024)
|
||||
- [Docker](https://docs.docker.com/get-docker/) (for the local K3D cluster)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (optional, for inspecting the cluster)
|
||||
|
||||
### Run it
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
|
||||
# Build the project
|
||||
cargo build --release
|
||||
|
||||
# Run the example
|
||||
cargo run -p example-postgresql
|
||||
```
|
||||
|
||||
Harmony will print its progress as it sets up the cluster and deploys PostgreSQL. When complete, you can inspect the deployment:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n harmony-postgres-example
|
||||
kubectl get secret -n harmony-postgres-example harmony-postgres-example-db-user -o jsonpath='{.data.password}' | base64 -d
|
||||
```
|
||||
|
||||
To connect to the database, forward the port:
|
||||
```bash
|
||||
kubectl port-forward -n harmony-postgres-example svc/harmony-postgres-example-rw 5432:5432
|
||||
psql -h localhost -p 5432 -U postgres
|
||||
```
|
||||
|
||||
To clean up, delete the K3D cluster:
|
||||
```bash
|
||||
k3d cluster delete harmony-postgres-example
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables
|
||||
|
||||
`K8sAnywhereTopology::from_env()` reads the following environment variables to determine where and how to connect:
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `KUBECONFIG` | `~/.kube/config` | Path to your kubeconfig file |
|
||||
| `HARMONY_AUTOINSTALL` | `true` | Auto-provision a local K3D cluster if none found |
|
||||
| `HARMONY_USE_LOCAL_K3D` | `true` | Always prefer local K3D over remote clusters |
|
||||
| `HARMONY_PROFILE` | `dev` | Deployment profile: `dev`, `staging`, or `prod` |
|
||||
| `HARMONY_K8S_CONTEXT` | _none_ | Use a specific kubeconfig context |
|
||||
| `HARMONY_PUBLIC_DOMAIN` | _none_ | Public domain for ingress endpoints |
|
||||
|
||||
To connect to an existing Kubernetes cluster instead of provisioning K3D:
|
||||
|
||||
```bash
|
||||
# Point to your kubeconfig
|
||||
export KUBECONFIG=/path/to/your/kubeconfig
|
||||
export HARMONY_USE_LOCAL_K3D=false
|
||||
export HARMONY_AUTOINSTALL=false
|
||||
|
||||
# Then run
|
||||
cargo run -p example-postgresql
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
All documentation is in the `/docs` directory.
|
||||
| I want to... | Start here |
|
||||
|--------------|------------|
|
||||
| Understand the core concepts | [Core Concepts](./docs/concepts.md) |
|
||||
| Deploy my first application | [Getting Started Guide](./docs/guides/getting-started.md) |
|
||||
| Explore available components | [Scores Catalog](./docs/catalogs/scores.md) · [Topologies Catalog](./docs/catalogs/topologies.md) |
|
||||
| See a complete bare-metal deployment | [OKD on Bare Metal](./docs/use-cases/okd-on-bare-metal.md) |
|
||||
| Build my own Score or Topology | [Developer Guide](./docs/guides/developer-guide.md) |
|
||||
|
||||
- [Documentation Hub](./docs/README.md): The main entry point for all documentation.
|
||||
- [Core Concepts](./docs/concepts.md): A detailed look at Score, Topology, Capability, Inventory, and Interpret.
|
||||
- [Component Catalogs](./docs/catalogs/README.md): Discover all available Scores, Topologies, and Capabilities.
|
||||
- [Developer Guide](./docs/guides/developer-guide.md): Learn how to write your own Scores and Topologies.
|
||||
---
|
||||
|
||||
## Architectural Decision Records
|
||||
## Why Rust?
|
||||
|
||||
- [ADR-001 · Why Rust](adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
|
||||
We chose Rust for the same reason you might: **reliability through type safety**.
|
||||
|
||||
## Contribute
|
||||
Infrastructure code runs in production. It needs to be correct. Rust's ownership model and type system let us build a framework where:
|
||||
|
||||
Discussions and roadmap live in [Issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
|
||||
- Invalid configurations fail at compile time, not at 3 AM
|
||||
- Refactoring infrastructure is as safe as refactoring application code
|
||||
- The compiler verifies that your platform can actually fulfill your requirements
|
||||
|
||||
See [ADR-001 · Why Rust](./adr/001-rust.md) for our full rationale.
|
||||
|
||||
---
|
||||
|
||||
## Architecture Decisions
|
||||
|
||||
Harmony's design is documented through Architecture Decision Records (ADRs):
|
||||
|
||||
- [ADR-001 · Why Rust](./adr/001-rust.md)
|
||||
- [ADR-003 · Infrastructure Abstractions](./adr/003-infrastructure-abstractions.md)
|
||||
- [ADR-006 · Secret Management](./adr/006-secret-management.md)
|
||||
- [ADR-011 · Multi-Tenant Cluster](./adr/011-multi-tenant-cluster.md)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
Harmony is released under the **GNU AGPL v3**.
|
||||
|
||||
> We choose a strong copyleft license to ensure the project—and every improvement to it—remains open and benefits the entire community. Fork it, enhance it, even out-innovate us; just keep it open.
|
||||
> We choose a strong copyleft license to ensure the project—and every improvement to it—remains open and benefits the entire community.
|
||||
|
||||
See [LICENSE](LICENSE) for the full text.
|
||||
|
||||
---
|
||||
|
||||
_Made with ❤️ & 🦀 by the NationTech and the Harmony community_
|
||||
_Made with ❤️ & 🦀 by NationTech and the Harmony community_
|
||||
|
||||
29
ROADMAP.md
Normal file
29
ROADMAP.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Harmony Roadmap
|
||||
|
||||
Six phases to take Harmony from working prototype to production-ready open-source project.
|
||||
|
||||
| # | Phase | Status | Depends On | Detail |
|
||||
|---|-------|--------|------------|--------|
|
||||
| 1 | [Harden `harmony_config`](ROADMAP/01-config-crate.md) | Not started | — | Test every source, add SQLite backend, wire Zitadel + OpenBao, validate zero-setup UX |
|
||||
| 2 | [Migrate to `harmony_config`](ROADMAP/02-refactor-harmony-config.md) | Not started | 1 | Replace all 19 `SecretManager` call sites, deprecate direct `harmony_secret` usage |
|
||||
| 3 | [Complete `harmony_assets`](ROADMAP/03-assets-crate.md) | Not started | 1, 2 | Test, refactor k3d and OKD to use it, implement `Url::Url`, remove LFS |
|
||||
| 4 | [Publish to GitHub](ROADMAP/04-publish-github.md) | Not started | 3 | Clean history, set up GitHub as community hub, CI on self-hosted runners |
|
||||
| 5 | [E2E tests: PostgreSQL & RustFS](ROADMAP/05-e2e-tests-simple.md) | Not started | 1 | k3d-based test harness, two passing E2E tests, CI job |
|
||||
| 6 | [E2E tests: OKD HA on KVM](ROADMAP/06-e2e-tests-kvm.md) | Not started | 5 | KVM test infrastructure, full OKD installation test, nightly CI |
|
||||
|
||||
## Current State (as of branch `feature/kvm-module`)
|
||||
|
||||
- `harmony_config` crate exists with `EnvSource`, `LocalFileSource`, `PromptSource`, `StoreSource`. 12 unit tests. **Zero consumers** in workspace — everything still uses `harmony_secret::SecretManager` directly (19 call sites).
|
||||
- `harmony_assets` crate exists with `Asset`, `LocalCache`, `LocalStore`, `S3Store`. **No tests. Zero consumers.** The `k3d` crate has its own `DownloadableAsset` with identical functionality and full test coverage.
|
||||
- `harmony_secret` has `LocalFileSecretStore`, `OpenbaoSecretStore` (token/userpass only), `InfisicalSecretStore`. Works but no Zitadel OIDC integration.
|
||||
- KVM module exists on this branch with `KvmExecutor`, VM lifecycle, ISO download, two examples (`example_linux_vm`, `kvm_okd_ha_cluster`).
|
||||
- RustFS module exists on `feat/rustfs` branch (2 commits ahead of master).
|
||||
- 39 example crates, **zero E2E tests**. Unit tests pass across workspace (~240 tests).
|
||||
- CI runs `cargo check`, `fmt`, `clippy`, `test` on Gitea. No E2E job.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
- **Zero-setup first**: A new user clones, runs `cargo run`, gets prompted for config, values persist to local SQLite. No env vars, no external services required.
|
||||
- **Progressive disclosure**: Local SQLite → OpenBao → Zitadel SSO. Each layer is opt-in.
|
||||
- **Test what ships**: Every example that works should have an E2E test proving it works.
|
||||
- **Community over infrastructure**: GitHub for engagement, self-hosted runners for CI.
|
||||
170
ROADMAP/01-config-crate.md
Normal file
170
ROADMAP/01-config-crate.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# Phase 1: Harden `harmony_config`, Validate UX, Zero-Setup Starting Point
|
||||
|
||||
## Goal
|
||||
|
||||
Make `harmony_config` production-ready with a seamless first-run experience: clone, run, get prompted, values persist locally. Then progressively add team-scale backends (OpenBao, Zitadel SSO) without changing any calling code.
|
||||
|
||||
## Current State
|
||||
|
||||
`harmony_config` now has:
|
||||
|
||||
- `Config` trait + `#[derive(Config)]` macro
|
||||
- `ConfigManager` with ordered source chain
|
||||
- Five `ConfigSource` implementations:
|
||||
- `EnvSource` — reads `HARMONY_CONFIG_{KEY}` env vars
|
||||
- `LocalFileSource` — reads/writes `{key}.json` files from a directory
|
||||
- `SqliteSource` — **NEW** reads/writes to SQLite database
|
||||
- `PromptSource` — returns `None` / no-op on set (placeholder for TUI integration)
|
||||
- `StoreSource<S: SecretStore>` — wraps any `harmony_secret::SecretStore` backend
|
||||
- 24 unit tests (mock source, env, local file, sqlite, prompt, integration)
|
||||
- Global `CONFIG_MANAGER` static with `init()`, `get()`, `get_or_prompt()`, `set()`
|
||||
- Two examples: `basic` and `prompting` in `harmony_config/examples/`
|
||||
- **Zero workspace consumers** — nothing calls `harmony_config` yet
|
||||
|
||||
## Tasks
|
||||
|
||||
### 1.1 Add `SqliteSource` as the default zero-setup backend ✅
|
||||
|
||||
**Status**: Implemented
|
||||
|
||||
**Implementation Details**:
|
||||
|
||||
- Database location: `~/.local/share/harmony/config/config.db` (directory is auto-created)
|
||||
- Schema: `config(key TEXT PRIMARY KEY, value TEXT NOT NULL, updated_at TEXT NOT NULL DEFAULT (datetime('now')))`
|
||||
- Uses `sqlx` with SQLite runtime
|
||||
- `SqliteSource::open(path)` - opens/creates database at given path
|
||||
- `SqliteSource::default()` - uses default Harmony data directory
|
||||
|
||||
**Files**:
|
||||
- `harmony_config/src/source/sqlite.rs` - new file
|
||||
- `harmony_config/Cargo.toml` - added `sqlx = { workspace = true, features = ["runtime-tokio", "sqlite"] }`
|
||||
- `Cargo.toml` - added `anyhow = "1.0"` to workspace dependencies
|
||||
|
||||
**Tests** (all passing):
|
||||
- `test_sqlite_set_and_get` — round-trip a `TestConfig` struct
|
||||
- `test_sqlite_get_returns_none_when_missing` — key not in DB
|
||||
- `test_sqlite_overwrites_on_set` — set twice, get returns latest
|
||||
- `test_sqlite_concurrent_access` — two tasks writing different keys simultaneously
|
||||
|
||||
### 1.1.1 Add Config example to show exact DX and confirm functionality ✅
|
||||
|
||||
**Status**: Implemented
|
||||
|
||||
**Examples created**:
|
||||
|
||||
1. `harmony_config/examples/basic.rs` - demonstrates:
|
||||
- Zero-setup SQLite backend (auto-creates directory)
|
||||
- Using the `#[derive(Config)]` macro
|
||||
- Environment variable override (`HARMONY_CONFIG_TestConfig` overrides SQLite)
|
||||
- Direct set/get operations
|
||||
- Persistence verification
|
||||
|
||||
2. `harmony_config/examples/prompting.rs` - demonstrates:
|
||||
- Config with no defaults (requires user input via `inquire`)
|
||||
- `get()` flow: env > sqlite > prompt fallback
|
||||
- `get_or_prompt()` for interactive configuration
|
||||
- Full resolution chain
|
||||
- Persistence of prompted values
|
||||
|
||||
### 1.2 Make `PromptSource` functional ✅
|
||||
|
||||
**Status**: Implemented with design improvement
|
||||
|
||||
**Key Finding - Bug Fixed During Implementation**:
|
||||
|
||||
The original design had a critical bug in `get_or_prompt()`:
|
||||
```rust
|
||||
// OLD (BUGGY) - breaks on first source where set() returns Ok(())
|
||||
for source in &self.sources {
|
||||
if source.set(T::KEY, &value).await.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Since `EnvSource.set()` returns `Ok(())` (successfully sets env var), the loop would break immediately and never write to `SqliteSource`. Prompted values were never persisted!
|
||||
|
||||
**Solution - Added `should_persist()` method to ConfigSource trait**:
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait ConfigSource: Send + Sync {
|
||||
async fn get(&self, key: &str) -> Result<Option<serde_json::Value>, ConfigError>;
|
||||
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError>;
|
||||
fn should_persist(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `EnvSource::should_persist()` returns `false` - shouldn't persist prompted values to env vars
|
||||
- `PromptSource::should_persist()` returns `false` - doesn't persist anyway
|
||||
- `get_or_prompt()` now skips sources where `should_persist()` is `false`
|
||||
|
||||
**Updated `get_or_prompt()`**:
|
||||
```rust
|
||||
for source in &self.sources {
|
||||
if !source.should_persist() {
|
||||
continue;
|
||||
}
|
||||
if source.set(T::KEY, &value).await.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Tests**:
|
||||
- `test_prompt_source_always_returns_none`
|
||||
- `test_prompt_source_set_is_noop`
|
||||
- `test_prompt_source_does_not_persist`
|
||||
- `test_full_chain_with_prompt_source_falls_through_to_prompt`
|
||||
|
||||
### 1.3 Integration test: full resolution chain ✅
|
||||
|
||||
**Status**: Implemented
|
||||
|
||||
**Tests**:
|
||||
- `test_full_resolution_chain_sqlite_fallback` — env not set, sqlite has value, get() returns sqlite
|
||||
- `test_full_resolution_chain_env_overrides_sqlite` — env set, sqlite has value, get() returns env
|
||||
- `test_branch_switching_scenario_deserialization_error` — old struct shape in sqlite returns Deserialization error
|
||||
|
||||
### 1.4 Validate Zitadel + OpenBao integration path ⏳
|
||||
|
||||
**Status**: Not yet implemented
|
||||
|
||||
Remaining work:
|
||||
- Validate that `ConfigManager::new(vec![EnvSource, SqliteSource, StoreSource<Openbao>])` compiles
|
||||
- When OpenBao is unreachable, chain falls through to SQLite gracefully
|
||||
- Document target Zitadel OIDC flow as ADR
|
||||
|
||||
### 1.5 UX validation checklist ⏳
|
||||
|
||||
**Status**: Partially complete - manual verification needed
|
||||
|
||||
- [ ] `cargo run --example postgresql` with no env vars → prompts for nothing
|
||||
- [ ] An example that uses `SecretManager` today (e.g., `brocade_snmp_server`) → when migrated to `harmony_config`, first run prompts, second run reads from SQLite
|
||||
- [ ] Setting `HARMONY_CONFIG_BrocadeSwitchAuth='{"host":"...","user":"...","password":"..."}'` → skips prompt, uses env value
|
||||
- [ ] Deleting `~/.local/share/harmony/config/` directory → re-prompts on next run
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [x] `SqliteSource` implementation with tests
|
||||
- [x] Functional `PromptSource` with `should_persist()` design
|
||||
- [x] Fix `get_or_prompt` to persist to first writable source (via `should_persist()`), not all sources
|
||||
- [x] Integration tests for full resolution chain
|
||||
- [x] Branch-switching deserialization failure test
|
||||
- [ ] `StoreSource<OpenbaoSecretStore>` integration validated (compiles, graceful fallback)
|
||||
- [ ] ADR for Zitadel OIDC target architecture
|
||||
- [ ] Update docs to reflect final implementation and behavior
|
||||
|
||||
## Key Implementation Notes
|
||||
|
||||
1. **SQLite path**: `~/.local/share/harmony/config/config.db` (not `~/.local/share/harmony/config.db`)
|
||||
|
||||
2. **Auto-create directory**: `SqliteSource::open()` creates parent directories if they don't exist
|
||||
|
||||
3. **Default path**: `SqliteSource::default()` uses `directories::ProjectDirs` to find the correct data directory
|
||||
|
||||
4. **Env var precedence**: Environment variables always take precedence over SQLite in the resolution chain
|
||||
|
||||
5. **Testing**: All tests use `tempfile::NamedTempFile` for temporary database paths, ensuring test isolation
|
||||
112
ROADMAP/02-refactor-harmony-config.md
Normal file
112
ROADMAP/02-refactor-harmony-config.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Phase 2: Migrate Workspace to `harmony_config`
|
||||
|
||||
## Goal
|
||||
|
||||
Replace every direct `harmony_secret::SecretManager` call with `harmony_config` equivalents. After this phase, modules and examples depend only on `harmony_config`. `harmony_secret` becomes an internal implementation detail behind `StoreSource`.
|
||||
|
||||
## Current State
|
||||
|
||||
19 call sites use `SecretManager::get_or_prompt::<T>()` across:
|
||||
|
||||
| Location | Secret Types | Call Sites |
|
||||
|----------|-------------|------------|
|
||||
| `harmony/src/modules/brocade/brocade_snmp.rs` | `BrocadeSnmpAuth`, `BrocadeSwitchAuth` | 2 |
|
||||
| `harmony/src/modules/nats/score_nats_k8s.rs` | `NatsAdmin` | 1 |
|
||||
| `harmony/src/modules/okd/bootstrap_02_bootstrap.rs` | `RedhatSecret`, `SshKeyPair` | 2 |
|
||||
| `harmony/src/modules/application/features/monitoring.rs` | `NtfyAuth` | 1 |
|
||||
| `brocade/examples/main.rs` | `BrocadeSwitchAuth` | 1 |
|
||||
| `examples/okd_installation/src/main.rs` + `topology.rs` | `SshKeyPair`, `BrocadeSwitchAuth`, `OPNSenseFirewallConfig` | 3 |
|
||||
| `examples/okd_pxe/src/main.rs` + `topology.rs` | `SshKeyPair`, `BrocadeSwitchAuth`, `OPNSenseFirewallCredentials` | 3 |
|
||||
| `examples/opnsense/src/main.rs` | `OPNSenseFirewallCredentials` | 1 |
|
||||
| `examples/sttest/src/main.rs` + `topology.rs` | `SshKeyPair`, `OPNSenseFirewallConfig` | 2 |
|
||||
| `examples/opnsense_node_exporter/` | (has dep but unclear usage) | ~1 |
|
||||
| `examples/okd_cluster_alerts/` | (has dep but unclear usage) | ~1 |
|
||||
| `examples/brocade_snmp_server/` | (has dep but unclear usage) | ~1 |
|
||||
|
||||
## Tasks
|
||||
|
||||
### 2.1 Bootstrap `harmony_config` in CLI and TUI entry points
|
||||
|
||||
Add `harmony_config::init()` as the first thing that happens in `harmony_cli::run()` and `harmony_tui::run()`.
|
||||
|
||||
```rust
|
||||
// harmony_cli/src/lib.rs — inside run()
|
||||
pub async fn run<T: Topology + Send + Sync + 'static>(
|
||||
inventory: Inventory,
|
||||
topology: T,
|
||||
scores: Vec<Box<dyn Score<T>>>,
|
||||
args_struct: Option<Args>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Initialize config system with default source chain
|
||||
let sqlite = Arc::new(SqliteSource::default().await?);
|
||||
let env = Arc::new(EnvSource);
|
||||
harmony_config::init(vec![env, sqlite]).await;
|
||||
|
||||
// ... rest of run()
|
||||
}
|
||||
```
|
||||
|
||||
This replaces the implicit `SecretManager` lazy initialization that currently happens on first `get_or_prompt` call.
|
||||
|
||||
### 2.2 Migrate each secret type from `Secret` to `Config`
|
||||
|
||||
For each secret struct, change:
|
||||
|
||||
```rust
|
||||
// Before
|
||||
use harmony_secret::Secret;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, InteractiveParse, Secret)]
|
||||
struct BrocadeSwitchAuth { ... }
|
||||
|
||||
// After
|
||||
use harmony_config::Config;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, InteractiveParse, Config)]
|
||||
struct BrocadeSwitchAuth { ... }
|
||||
```
|
||||
|
||||
At each call site, change:
|
||||
|
||||
```rust
|
||||
// Before
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>().await.unwrap();
|
||||
|
||||
// After
|
||||
let config = harmony_config::get_or_prompt::<BrocadeSwitchAuth>().await.unwrap();
|
||||
```
|
||||
|
||||
### 2.3 Migration order (low risk to high risk)
|
||||
|
||||
1. **`brocade/examples/main.rs`** — 1 call site, isolated example, easy to test manually
|
||||
2. **`examples/opnsense/src/main.rs`** — 1 call site, isolated
|
||||
3. **`harmony/src/modules/brocade/brocade_snmp.rs`** — 2 call sites, core module but straightforward
|
||||
4. **`harmony/src/modules/nats/score_nats_k8s.rs`** — 1 call site
|
||||
5. **`harmony/src/modules/application/features/monitoring.rs`** — 1 call site
|
||||
6. **`examples/sttest/`** — 2 call sites, has both main.rs and topology.rs patterns
|
||||
7. **`examples/okd_installation/`** — 3 call sites, complex topology setup
|
||||
8. **`examples/okd_pxe/`** — 3 call sites, similar to okd_installation
|
||||
9. **`harmony/src/modules/okd/bootstrap_02_bootstrap.rs`** — 2 call sites, critical OKD bootstrap path
|
||||
|
||||
### 2.4 Remove `harmony_secret` from direct dependencies
|
||||
|
||||
After all call sites are migrated:
|
||||
|
||||
1. Remove `harmony_secret` from `Cargo.toml` of: `harmony`, `brocade`, and all examples that had it
|
||||
2. `harmony_config` keeps `harmony_secret` as a dependency (for `StoreSource`)
|
||||
3. The `Secret` trait and `SecretManager` remain in `harmony_secret` but are not used directly anymore
|
||||
|
||||
### 2.5 Backward compatibility for existing local secrets
|
||||
|
||||
Users who already have secrets stored via `LocalFileSecretStore` (JSON files in `~/.local/share/harmony/secrets/`) need a migration path:
|
||||
|
||||
- On first run after upgrade, if SQLite has no entry for a key but the old JSON file exists, read from JSON and write to SQLite
|
||||
- Or: add `LocalFileSource` as a fallback source at the end of the chain (read-only) for one release cycle
|
||||
- Log a deprecation warning when reading from old JSON files
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [ ] `harmony_config::init()` called in `harmony_cli::run()` and `harmony_tui::run()`
|
||||
- [ ] All 19 call sites migrated from `SecretManager` to `harmony_config`
|
||||
- [ ] `harmony_secret` removed from direct dependencies of `harmony`, `brocade`, and all examples
|
||||
- [ ] Backward compatibility for existing local JSON secrets
|
||||
- [ ] All existing unit tests still pass
|
||||
- [ ] Manual verification: one migrated example works end-to-end (prompt → persist → read)
|
||||
141
ROADMAP/03-assets-crate.md
Normal file
141
ROADMAP/03-assets-crate.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Phase 3: Complete `harmony_assets`, Refactor Consumers
|
||||
|
||||
## Goal
|
||||
|
||||
Make `harmony_assets` the single way to manage downloadable binaries and images across Harmony. Eliminate `k3d::DownloadableAsset` duplication, implement `Url::Url` in OPNsense infra, remove LFS-tracked files from git.
|
||||
|
||||
## Current State
|
||||
|
||||
- `harmony_assets` exists with `Asset`, `LocalCache`, `LocalStore`, `S3Store` (behind feature flag). CLI with `upload`, `download`, `checksum`, `verify` commands. **No tests. Zero consumers.**
|
||||
- `k3d/src/downloadable_asset.rs` has the same functionality with full test coverage (httptest mock server, checksum verification, cache hit, 404 handling, checksum failure).
|
||||
- `Url::Url` variant in `harmony_types/src/net.rs` exists but is `todo!()` in OPNsense TFTP and HTTP infra layers.
|
||||
- OKD modules hardcode `./data/...` paths (`bootstrap_02_bootstrap.rs:84-88`, `ipxe.rs:73`).
|
||||
- `data/` directory contains ~3GB of LFS-tracked files (OKD binaries, PXE images, SCOS images).
|
||||
|
||||
## Tasks
|
||||
|
||||
### 3.1 Port k3d tests to `harmony_assets`
|
||||
|
||||
The k3d crate has 5 well-written tests in `downloadable_asset.rs`. Port them to test `harmony_assets::LocalStore`:
|
||||
|
||||
```rust
|
||||
// harmony_assets/tests/local_store.rs (or in src/ as unit tests)
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_downloads_and_verifies_checksum() {
|
||||
// Start httptest server serving a known file
|
||||
// Create Asset with URL pointing to mock server
|
||||
// Fetch via LocalStore
|
||||
// Assert file exists at expected cache path
|
||||
// Assert checksum matches
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_returns_cached_file_when_present() {
|
||||
// Pre-populate cache with correct file
|
||||
// Fetch — assert no HTTP request made (mock server not hit)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_fails_on_404() { ... }
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_fails_on_checksum_mismatch() { ... }
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_with_progress_callback() {
|
||||
// Assert progress callback is called with (bytes_received, total_size)
|
||||
}
|
||||
```
|
||||
|
||||
Add `httptest` to `[dev-dependencies]` of `harmony_assets`.
|
||||
|
||||
### 3.2 Refactor `k3d` to use `harmony_assets`
|
||||
|
||||
Replace `k3d/src/downloadable_asset.rs` with calls to `harmony_assets`:
|
||||
|
||||
```rust
|
||||
// k3d/src/lib.rs — in download_latest_release()
|
||||
use harmony_assets::{Asset, LocalCache, LocalStore, ChecksumAlgo};
|
||||
|
||||
let asset = Asset::new(
|
||||
binary_url,
|
||||
checksum,
|
||||
ChecksumAlgo::SHA256,
|
||||
K3D_BIN_FILE_NAME.to_string(),
|
||||
);
|
||||
let cache = LocalCache::new(self.base_dir.clone());
|
||||
let store = LocalStore::new();
|
||||
let path = store.fetch(&asset, &cache, None).await
|
||||
.map_err(|e| format!("Failed to download k3d: {}", e))?;
|
||||
```
|
||||
|
||||
Delete `k3d/src/downloadable_asset.rs`. Update k3d's `Cargo.toml` to depend on `harmony_assets`.
|
||||
|
||||
### 3.3 Define asset metadata as config structs
|
||||
|
||||
Following `plan.md` Phase 2, create typed config for OKD assets using `harmony_config`:
|
||||
|
||||
```rust
|
||||
// harmony/src/modules/okd/config.rs
|
||||
#[derive(Config, Serialize, Deserialize, JsonSchema, InteractiveParse)]
|
||||
struct OkdInstallerConfig {
|
||||
pub openshift_install_url: String,
|
||||
pub openshift_install_sha256: String,
|
||||
pub scos_kernel_url: String,
|
||||
pub scos_kernel_sha256: String,
|
||||
pub scos_initramfs_url: String,
|
||||
pub scos_initramfs_sha256: String,
|
||||
pub scos_rootfs_url: String,
|
||||
pub scos_rootfs_sha256: String,
|
||||
}
|
||||
```
|
||||
|
||||
First run prompts for URLs/checksums (or uses compiled-in defaults). Values persist to SQLite. Can be overridden via env vars or OpenBao.
|
||||
|
||||
### 3.4 Implement `Url::Url` in OPNsense infra layer
|
||||
|
||||
In `harmony/src/infra/opnsense/http.rs` and `tftp.rs`, implement the `Url::Url(url)` match arm:
|
||||
|
||||
```rust
|
||||
// Instead of SCP-ing files to OPNsense:
|
||||
// SSH into OPNsense, run: fetch -o /usr/local/http/{path} {url}
|
||||
// (FreeBSD-native HTTP client, no extra deps on OPNsense)
|
||||
```
|
||||
|
||||
This eliminates the manual `scp` workaround and the `inquire::Confirm` prompts in `ipxe.rs:126` and `bootstrap_02_bootstrap.rs:230`.
|
||||
|
||||
### 3.5 Refactor OKD modules to use assets + config
|
||||
|
||||
In `bootstrap_02_bootstrap.rs`:
|
||||
- `openshift-install`: Resolve `OkdInstallerConfig` from `harmony_config`, download via `harmony_assets`, invoke from cache.
|
||||
- SCOS images: Pass `Url::Url(scos_kernel_url)` etc. to `StaticFilesHttpScore`. OPNsense fetches from S3 directly.
|
||||
- Remove `oc` and `kubectl` from `data/okd/bin/` (never used by code).
|
||||
|
||||
In `ipxe.rs`:
|
||||
- Replace the folder-to-serve SCP workaround with individual `Url::Url` entries.
|
||||
- Remove the `inquire::Confirm` SCP prompts.
|
||||
|
||||
### 3.6 Upload assets to S3
|
||||
|
||||
- Upload all current `data/` binaries to Ceph S3 bucket with path scheme: `harmony-assets/okd/v{version}/openshift-install`, `harmony-assets/pxe/centos-stream-9/install.img`, etc.
|
||||
- Set public-read ACL or configure presigned URL generation.
|
||||
- Record S3 URLs and SHA256 checksums as defaults in the config structs.
|
||||
|
||||
### 3.7 Remove LFS, clean git
|
||||
|
||||
- Remove all LFS-tracked files from the repo.
|
||||
- Update `.gitattributes` to remove LFS filters.
|
||||
- Keep `data/` in `.gitignore` (it becomes a local cache directory).
|
||||
- Optionally use `git filter-repo` or BFG to strip LFS objects from history (required before Phase 4 GitHub publish).
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [ ] `harmony_assets` has tests ported from k3d pattern (5+ tests with httptest)
|
||||
- [ ] `k3d::DownloadableAsset` replaced by `harmony_assets` usage
|
||||
- [ ] `OkdInstallerConfig` struct using `harmony_config`
|
||||
- [ ] `Url::Url` implemented in OPNsense HTTP and TFTP infra
|
||||
- [ ] OKD bootstrap refactored to use lazy-download pattern
|
||||
- [ ] Assets uploaded to S3 with documented URLs/checksums
|
||||
- [ ] LFS removed, git history cleaned
|
||||
- [ ] Repo size small enough for GitHub (~code + templates only)
|
||||
110
ROADMAP/04-publish-github.md
Normal file
110
ROADMAP/04-publish-github.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Phase 4: Publish to GitHub
|
||||
|
||||
## Goal
|
||||
|
||||
Make Harmony publicly available on GitHub as the primary community hub for issues, pull requests, and discussions. CI runs on self-hosted runners.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Phase 3 complete: LFS removed, git history cleaned, repo is small
|
||||
- README polished with quick-start, architecture overview, examples
|
||||
- All existing tests pass
|
||||
|
||||
## Tasks
|
||||
|
||||
### 4.1 Clean git history
|
||||
|
||||
```bash
|
||||
# Option A: git filter-repo (preferred)
|
||||
git filter-repo --strip-blobs-bigger-than 10M
|
||||
|
||||
# Option B: BFG Repo Cleaner
|
||||
bfg --strip-blobs-bigger-than 10M
|
||||
git reflog expire --expire=now --all
|
||||
git gc --prune=now --aggressive
|
||||
```
|
||||
|
||||
Verify final repo size is reasonable (target: <50MB including all code, docs, templates).
|
||||
|
||||
### 4.2 Create GitHub repository
|
||||
|
||||
- Create `NationTech/harmony` (or chosen org/name) on GitHub
|
||||
- Push cleaned repo as initial commit
|
||||
- Set default branch to `main` (rename from `master` if desired)
|
||||
|
||||
### 4.3 Set up CI on self-hosted runners
|
||||
|
||||
GitHub is the community hub, but CI runs on your own infrastructure. Options:
|
||||
|
||||
**Option A: GitHub Actions with self-hosted runners**
|
||||
- Register your Gitea runner machines as GitHub Actions self-hosted runners
|
||||
- Port `.gitea/workflows/check.yml` to `.github/workflows/check.yml`
|
||||
- Same Docker image (`hub.nationtech.io/harmony/harmony_composer:latest`), same commands
|
||||
- Pro: native GitHub PR checks, no external service needed
|
||||
- Con: runners need outbound access to GitHub API
|
||||
|
||||
**Option B: External CI (Woodpecker, Drone, Jenkins)**
|
||||
- Use any CI that supports webhooks from GitHub
|
||||
- Report status back to GitHub via commit status API / checks API
|
||||
- Pro: fully self-hosted, no GitHub dependency for builds
|
||||
- Con: extra integration work
|
||||
|
||||
**Option C: Keep Gitea CI, mirror from GitHub**
|
||||
- GitHub repo has a webhook that triggers Gitea CI on push
|
||||
- Gitea reports back to GitHub via commit status API
|
||||
- Pro: no migration of CI config
|
||||
- Con: fragile webhook chain
|
||||
|
||||
**Recommendation**: Option A. GitHub Actions self-hosted runners are straightforward and give the best contributor UX (native PR checks). The workflow files are nearly identical to Gitea workflows.
|
||||
|
||||
```yaml
|
||||
# .github/workflows/check.yml
|
||||
name: Check
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
check:
|
||||
runs-on: self-hosted
|
||||
container:
|
||||
image: hub.nationtech.io/harmony/harmony_composer:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: bash build/check.sh
|
||||
```
|
||||
|
||||
### 4.4 Polish documentation
|
||||
|
||||
- **README.md**: Quick-start (clone → run → get prompted → see result), architecture diagram (Score → Interpret → Topology), link to docs and examples
|
||||
- **CONTRIBUTING.md**: Already exists. Review for GitHub-specific guidance (fork workflow, PR template)
|
||||
- **docs/**: Already comprehensive. Verify links work on GitHub rendering
|
||||
- **Examples**: Ensure each example has a one-line description in its `Cargo.toml` and a comment block in `main.rs`
|
||||
|
||||
### 4.5 License and legal
|
||||
|
||||
- Verify workspace `license` field in root `Cargo.toml` is set correctly
|
||||
- Add `LICENSE` file at repo root if not present
|
||||
- Scan for any proprietary dependencies or hardcoded internal URLs
|
||||
|
||||
### 4.6 GitHub repository configuration
|
||||
|
||||
- Branch protection on `main`: require PR review, require CI to pass
|
||||
- Issue templates: bug report, feature request
|
||||
- PR template: checklist (tests pass, docs updated, etc.)
|
||||
- Topics/tags: `rust`, `infrastructure-as-code`, `kubernetes`, `orchestration`, `bare-metal`
|
||||
- Repository description: "Infrastructure orchestration framework. Declare what you want (Score), describe your infrastructure (Topology), let Harmony figure out how."
|
||||
|
||||
### 4.7 Gitea as internal mirror
|
||||
|
||||
- Set up Gitea to mirror from GitHub (pull mirror)
|
||||
- Internal CI can continue running on Gitea for private/experimental branches
|
||||
- Public contributions flow through GitHub
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [ ] Git history cleaned, repo size <50MB
|
||||
- [ ] Public GitHub repository created
|
||||
- [ ] CI running on self-hosted runners with GitHub Actions
|
||||
- [ ] Branch protection enabled
|
||||
- [ ] README polished with quick-start guide
|
||||
- [ ] Issue and PR templates created
|
||||
- [ ] LICENSE file present
|
||||
- [ ] Gitea configured as mirror
|
||||
255
ROADMAP/05-e2e-tests-simple.md
Normal file
255
ROADMAP/05-e2e-tests-simple.md
Normal file
@@ -0,0 +1,255 @@
|
||||
# Phase 5: E2E Tests for PostgreSQL & RustFS
|
||||
|
||||
## Goal
|
||||
|
||||
Establish an automated E2E test pipeline that proves working examples actually work. Start with the two simplest k8s-based examples: PostgreSQL and RustFS.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Phase 1 complete (config crate works, bootstrap is clean)
|
||||
- `feat/rustfs` branch merged
|
||||
|
||||
## Architecture
|
||||
|
||||
### Test harness: `tests/e2e/`
|
||||
|
||||
A dedicated workspace member crate at `tests/e2e/` that contains:
|
||||
|
||||
1. **Shared k3d utilities** — create/destroy clusters, wait for readiness
|
||||
2. **Per-example test modules** — each example gets a `#[tokio::test]` function
|
||||
3. **Assertion helpers** — wait for pods, check CRDs exist, verify services
|
||||
|
||||
```
|
||||
tests/
|
||||
e2e/
|
||||
Cargo.toml
|
||||
src/
|
||||
lib.rs # Shared test utilities
|
||||
k3d.rs # k3d cluster lifecycle
|
||||
k8s_assert.rs # K8s assertion helpers
|
||||
tests/
|
||||
postgresql.rs # PostgreSQL E2E test
|
||||
rustfs.rs # RustFS E2E test
|
||||
```
|
||||
|
||||
### k3d cluster lifecycle
|
||||
|
||||
```rust
|
||||
// tests/e2e/src/k3d.rs
|
||||
use k3d_rs::K3d;
|
||||
|
||||
pub struct TestCluster {
|
||||
pub name: String,
|
||||
pub k3d: K3d,
|
||||
pub client: kube::Client,
|
||||
reuse: bool,
|
||||
}
|
||||
|
||||
impl TestCluster {
|
||||
/// Creates a k3d cluster for testing.
|
||||
/// If HARMONY_E2E_REUSE_CLUSTER=1, reuses existing cluster.
|
||||
pub async fn ensure(name: &str) -> Result<Self, String> {
|
||||
let reuse = std::env::var("HARMONY_E2E_REUSE_CLUSTER")
|
||||
.map(|v| v == "1")
|
||||
.unwrap_or(false);
|
||||
|
||||
let base_dir = PathBuf::from("/tmp/harmony-e2e");
|
||||
let k3d = K3d::new(base_dir, Some(name.to_string()));
|
||||
|
||||
let client = k3d.ensure_installed().await?;
|
||||
|
||||
Ok(Self { name: name.to_string(), k3d, client, reuse })
|
||||
}
|
||||
|
||||
/// Returns the kubeconfig path for this cluster.
|
||||
pub fn kubeconfig_path(&self) -> String { ... }
|
||||
}
|
||||
|
||||
impl Drop for TestCluster {
|
||||
fn drop(&mut self) {
|
||||
if !self.reuse {
|
||||
// Best-effort cleanup
|
||||
let _ = self.k3d.run_k3d_command(["cluster", "delete", &self.name]);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### K8s assertion helpers
|
||||
|
||||
```rust
|
||||
// tests/e2e/src/k8s_assert.rs
|
||||
|
||||
/// Wait until a pod matching the label selector is Running in the namespace.
|
||||
/// Times out after `timeout` duration.
|
||||
pub async fn wait_for_pod_running(
|
||||
client: &kube::Client,
|
||||
namespace: &str,
|
||||
label_selector: &str,
|
||||
timeout: Duration,
|
||||
) -> Result<(), String>
|
||||
|
||||
/// Assert a CRD instance exists.
|
||||
pub async fn assert_resource_exists<K: kube::Resource>(
|
||||
client: &kube::Client,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<(), String>
|
||||
|
||||
/// Install a Helm chart. Returns when all pods in the release are running.
|
||||
pub async fn helm_install(
|
||||
release_name: &str,
|
||||
chart: &str,
|
||||
namespace: &str,
|
||||
repo_url: Option<&str>,
|
||||
timeout: Duration,
|
||||
) -> Result<(), String>
|
||||
```
|
||||
|
||||
## Tasks
|
||||
|
||||
### 5.1 Create the `tests/e2e/` crate
|
||||
|
||||
Add to workspace `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[workspace]
|
||||
members = [
|
||||
# ... existing members
|
||||
"tests/e2e",
|
||||
]
|
||||
```
|
||||
|
||||
`tests/e2e/Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[package]
|
||||
name = "harmony-e2e-tests"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
k3d_rs = { path = "../../k3d", package = "k3d_rs" }
|
||||
kube = { workspace = true }
|
||||
k8s-openapi = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
```
|
||||
|
||||
### 5.2 PostgreSQL E2E test
|
||||
|
||||
```rust
|
||||
// tests/e2e/tests/postgresql.rs
|
||||
use harmony::modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig};
|
||||
use harmony::topology::K8sAnywhereTopology;
|
||||
use harmony::inventory::Inventory;
|
||||
use harmony::maestro::Maestro;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_postgresql_deploys_on_k3d() {
|
||||
let cluster = TestCluster::ensure("harmony-e2e-pg").await.unwrap();
|
||||
|
||||
// Install CNPG operator via Helm
|
||||
// (K8sAnywhereTopology::ensure_ready() now handles this since
|
||||
// commit e1183ef "K8s postgresql score now ensures cnpg is installed")
|
||||
// But we may need the Helm chart for non-OKD:
|
||||
helm_install(
|
||||
"cnpg",
|
||||
"cloudnative-pg",
|
||||
"cnpg-system",
|
||||
Some("https://cloudnative-pg.github.io/charts"),
|
||||
Duration::from_secs(120),
|
||||
).await.unwrap();
|
||||
|
||||
// Configure topology pointing to test cluster
|
||||
let config = K8sAnywhereConfig {
|
||||
kubeconfig: Some(cluster.kubeconfig_path()),
|
||||
use_local_k3d: false,
|
||||
autoinstall: false,
|
||||
use_system_kubeconfig: false,
|
||||
harmony_profile: "dev".to_string(),
|
||||
k8s_context: None,
|
||||
};
|
||||
let topology = K8sAnywhereTopology::with_config(config);
|
||||
|
||||
// Create and run the score
|
||||
let score = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "e2e-test-pg".to_string(),
|
||||
namespace: "e2e-pg-test".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let mut maestro = Maestro::initialize(Inventory::autoload(), topology).await.unwrap();
|
||||
maestro.register_all(vec![Box::new(score)]);
|
||||
|
||||
let scores = maestro.scores().read().unwrap().first().unwrap().clone_box();
|
||||
let result = maestro.interpret(scores).await;
|
||||
assert!(result.is_ok(), "PostgreSQL score failed: {:?}", result.err());
|
||||
|
||||
// Assert: CNPG Cluster resource exists
|
||||
// (the Cluster CRD is applied — pod readiness may take longer)
|
||||
let client = cluster.client.clone();
|
||||
// ... assert Cluster CRD exists in e2e-pg-test namespace
|
||||
}
|
||||
```
|
||||
|
||||
### 5.3 RustFS E2E test
|
||||
|
||||
Similar structure. Details depend on what the RustFS score deploys (likely a Helm chart or k8s resources for MinIO/RustFS).
|
||||
|
||||
```rust
|
||||
#[tokio::test]
|
||||
async fn test_rustfs_deploys_on_k3d() {
|
||||
let cluster = TestCluster::ensure("harmony-e2e-rustfs").await.unwrap();
|
||||
// ... similar pattern: configure topology, create score, interpret, assert
|
||||
}
|
||||
```
|
||||
|
||||
### 5.4 CI job for E2E tests
|
||||
|
||||
New workflow file (Gitea or GitHub Actions):
|
||||
|
||||
```yaml
|
||||
# .gitea/workflows/e2e.yml (or .github/workflows/e2e.yml)
|
||||
name: E2E Tests
|
||||
on:
|
||||
push:
|
||||
branches: [master, main]
|
||||
# Don't run on every PR — too slow. Run on label or manual trigger.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
runs-on: self-hosted # Must have Docker available for k3d
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install k3d
|
||||
run: curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
||||
|
||||
- name: Run E2E tests
|
||||
run: cargo test -p harmony-e2e-tests -- --test-threads=1
|
||||
env:
|
||||
RUST_LOG: info
|
||||
```
|
||||
|
||||
Note `--test-threads=1`: E2E tests create k3d clusters and should not run in parallel (port conflicts, resource contention).
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [ ] `tests/e2e/` crate added to workspace
|
||||
- [ ] Shared test utilities: `TestCluster`, `wait_for_pod_running`, `helm_install`
|
||||
- [ ] PostgreSQL E2E test passing
|
||||
- [ ] RustFS E2E test passing (after `feat/rustfs` merge)
|
||||
- [ ] CI job running E2E tests on push to main
|
||||
- [ ] `HARMONY_E2E_REUSE_CLUSTER=1` for fast local iteration
|
||||
214
ROADMAP/06-e2e-tests-kvm.md
Normal file
214
ROADMAP/06-e2e-tests-kvm.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# Phase 6: E2E Tests for OKD HA Cluster on KVM
|
||||
|
||||
## Goal
|
||||
|
||||
Prove the full OKD bare-metal installation flow works end-to-end using KVM virtual machines. This is the ultimate validation of Harmony's core value proposition: declare an OKD cluster, point it at infrastructure, watch it materialize.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Phase 5 complete (test harness exists, k3d tests passing)
|
||||
- `feature/kvm-module` merged to main
|
||||
- A CI runner with libvirt/KVM access and nested virtualization support
|
||||
|
||||
## Architecture
|
||||
|
||||
The KVM branch already has a `kvm_okd_ha_cluster` example that creates:
|
||||
|
||||
```
|
||||
Host bridge (WAN)
|
||||
|
|
||||
+--------------------+
|
||||
| OPNsense | 192.168.100.1
|
||||
| gateway + PXE |
|
||||
+--------+-----------+
|
||||
|
|
||||
harmonylan (192.168.100.0/24)
|
||||
+---------+---------+---------+---------+
|
||||
| | | | |
|
||||
+----+---+ +---+---+ +---+---+ +---+---+ +--+----+
|
||||
| cp0 | | cp1 | | cp2 | |worker0| |worker1|
|
||||
| .10 | | .11 | | .12 | | .20 | | .21 |
|
||||
+--------+ +-------+ +-------+ +-------+ +---+---+
|
||||
|
|
||||
+-----+----+
|
||||
| worker2 |
|
||||
| .22 |
|
||||
+----------+
|
||||
```
|
||||
|
||||
The test needs to orchestrate this entire setup, wait for OKD to converge, and assert the cluster is healthy.
|
||||
|
||||
## Tasks
|
||||
|
||||
### 6.1 Start with `example_linux_vm` — the simplest KVM test
|
||||
|
||||
Before tackling the full OKD stack, validate the KVM module itself with the simplest possible test:
|
||||
|
||||
```rust
|
||||
// tests/e2e/tests/kvm_linux_vm.rs
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires libvirt access — run with: cargo test -- --ignored
|
||||
async fn test_linux_vm_boots_from_iso() {
|
||||
let executor = KvmExecutor::from_env().unwrap();
|
||||
|
||||
// Create isolated network
|
||||
let network = NetworkConfig {
|
||||
name: "e2e-test-net".to_string(),
|
||||
bridge: "virbr200".to_string(),
|
||||
// ...
|
||||
};
|
||||
executor.ensure_network(&network).await.unwrap();
|
||||
|
||||
// Define and start VM
|
||||
let vm_config = VmConfig::builder("e2e-linux-test")
|
||||
.vcpus(1)
|
||||
.memory_gb(1)
|
||||
.disk(5)
|
||||
.network(NetworkRef::named("e2e-test-net"))
|
||||
.cdrom("https://releases.ubuntu.com/24.04/ubuntu-24.04-live-server-amd64.iso")
|
||||
.boot_order([BootDevice::Cdrom, BootDevice::Disk])
|
||||
.build();
|
||||
|
||||
executor.ensure_vm(&vm_config).await.unwrap();
|
||||
executor.start_vm("e2e-linux-test").await.unwrap();
|
||||
|
||||
// Assert VM is running
|
||||
let status = executor.vm_status("e2e-linux-test").await.unwrap();
|
||||
assert_eq!(status, VmStatus::Running);
|
||||
|
||||
// Cleanup
|
||||
executor.destroy_vm("e2e-linux-test").await.unwrap();
|
||||
executor.undefine_vm("e2e-linux-test").await.unwrap();
|
||||
executor.delete_network("e2e-test-net").await.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
This test validates:
|
||||
- ISO download works (via `harmony_assets` if refactored, or built-in KVM module download)
|
||||
- libvirt XML generation is correct
|
||||
- VM lifecycle (define → start → status → destroy → undefine)
|
||||
- Network creation/deletion
|
||||
|
||||
### 6.2 OKD HA Cluster E2E test
|
||||
|
||||
The full integration test. This is long-running (30-60 minutes) and should only run nightly or on-demand.
|
||||
|
||||
```rust
|
||||
// tests/e2e/tests/kvm_okd_ha.rs
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires KVM + significant resources. Run nightly.
|
||||
async fn test_okd_ha_cluster_on_kvm() {
|
||||
// 1. Create virtual infrastructure
|
||||
// - OPNsense gateway VM
|
||||
// - 3 control plane VMs
|
||||
// - 3 worker VMs
|
||||
// - Virtual network (harmonylan)
|
||||
|
||||
// 2. Run OKD installation scores
|
||||
// (the kvm_okd_ha_cluster example, but as a test)
|
||||
|
||||
// 3. Wait for OKD API server to become reachable
|
||||
// - Poll https://api.okd.harmonylan:6443 until it responds
|
||||
// - Timeout: 30 minutes
|
||||
|
||||
// 4. Assert cluster health
|
||||
// - All nodes in Ready state
|
||||
// - ClusterVersion reports Available=True
|
||||
// - Sample workload (nginx) deploys and pod reaches Running
|
||||
|
||||
// 5. Cleanup
|
||||
// - Destroy all VMs
|
||||
// - Delete virtual networks
|
||||
// - Clean up disk images
|
||||
}
|
||||
```
|
||||
|
||||
### 6.3 CI runner requirements
|
||||
|
||||
The KVM E2E test needs a runner with:
|
||||
|
||||
- **Hardware**: 32GB+ RAM, 8+ CPU cores, 100GB+ disk
|
||||
- **Software**: libvirt, QEMU/KVM, `virsh`, nested virtualization enabled
|
||||
- **Network**: Outbound internet access (to download ISOs, OKD images)
|
||||
- **Permissions**: User in `libvirt` group, or root access
|
||||
|
||||
Options:
|
||||
- **Dedicated bare-metal machine** registered as a self-hosted GitHub Actions runner
|
||||
- **Cloud VM with nested virt** (e.g., GCP n2-standard-8 with `--enable-nested-virtualization`)
|
||||
- **Manual trigger only** — developer runs locally, CI just tracks pass/fail
|
||||
|
||||
### 6.4 Nightly CI job
|
||||
|
||||
```yaml
|
||||
# .github/workflows/e2e-kvm.yml
|
||||
name: E2E KVM Tests
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # 2 AM daily
|
||||
workflow_dispatch: # Manual trigger
|
||||
|
||||
jobs:
|
||||
kvm-tests:
|
||||
runs-on: [self-hosted, kvm] # Label for KVM-capable runners
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Run KVM E2E tests
|
||||
run: cargo test -p harmony-e2e-tests -- --ignored --test-threads=1
|
||||
env:
|
||||
RUST_LOG: info
|
||||
HARMONY_KVM_URI: qemu:///system
|
||||
|
||||
- name: Cleanup VMs on failure
|
||||
if: failure()
|
||||
run: |
|
||||
virsh list --all --name | grep e2e | xargs -I {} virsh destroy {} || true
|
||||
virsh list --all --name | grep e2e | xargs -I {} virsh undefine {} --remove-all-storage || true
|
||||
```
|
||||
|
||||
### 6.5 Test resource management
|
||||
|
||||
KVM tests create real resources that must be cleaned up even on failure. Implement a test fixture pattern:
|
||||
|
||||
```rust
|
||||
struct KvmTestFixture {
|
||||
executor: KvmExecutor,
|
||||
vms: Vec<String>,
|
||||
networks: Vec<String>,
|
||||
}
|
||||
|
||||
impl KvmTestFixture {
|
||||
fn track_vm(&mut self, name: &str) { self.vms.push(name.to_string()); }
|
||||
fn track_network(&mut self, name: &str) { self.networks.push(name.to_string()); }
|
||||
}
|
||||
|
||||
impl Drop for KvmTestFixture {
|
||||
fn drop(&mut self) {
|
||||
// Best-effort cleanup of all tracked resources
|
||||
for vm in &self.vms {
|
||||
let _ = std::process::Command::new("virsh")
|
||||
.args(["destroy", vm]).output();
|
||||
let _ = std::process::Command::new("virsh")
|
||||
.args(["undefine", vm, "--remove-all-storage"]).output();
|
||||
}
|
||||
for net in &self.networks {
|
||||
let _ = std::process::Command::new("virsh")
|
||||
.args(["net-destroy", net]).output();
|
||||
let _ = std::process::Command::new("virsh")
|
||||
.args(["net-undefine", net]).output();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Deliverables
|
||||
|
||||
- [ ] `test_linux_vm_boots_from_iso` — passing KVM smoke test
|
||||
- [ ] `test_okd_ha_cluster_on_kvm` — full OKD installation test
|
||||
- [ ] `KvmTestFixture` with resource cleanup on test failure
|
||||
- [ ] Nightly CI job on KVM-capable runner
|
||||
- [ ] Force-cleanup script for leaked VMs/networks
|
||||
- [ ] Documentation: how to set up a KVM runner for E2E tests
|
||||
@@ -1,318 +0,0 @@
|
||||
# Architecture Decision Record: Monitoring and Alerting Architecture
|
||||
|
||||
Initial Author: Willem Rolleman, Jean-Gabriel Carrier
|
||||
|
||||
Initial Date: March 9, 2026
|
||||
|
||||
Last Updated Date: March 9, 2026
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
Supersedes: [ADR-010](010-monitoring-and-alerting.md)
|
||||
|
||||
## Context
|
||||
|
||||
Harmony needs a unified approach to monitoring and alerting across different infrastructure targets:
|
||||
|
||||
1. **Cluster-level monitoring**: Administrators managing entire Kubernetes/OKD clusters need to define cluster-wide alerts, receivers, and scrape targets.
|
||||
|
||||
2. **Tenant-level monitoring**: Multi-tenant clusters where teams are confined to namespaces need monitoring scoped to their resources.
|
||||
|
||||
3. **Application-level monitoring**: Developers deploying applications want zero-config monitoring that "just works" for their services.
|
||||
|
||||
The monitoring landscape is fragmented:
|
||||
- **OKD/OpenShift**: Built-in Prometheus with AlertmanagerConfig CRDs
|
||||
- **KubePrometheus**: Helm-based stack with PrometheusRule CRDs
|
||||
- **RHOB (Red Hat Observability)**: Operator-based with MonitoringStack CRDs
|
||||
- **Standalone Prometheus**: Raw Prometheus deployments
|
||||
|
||||
Each system has different CRDs, different installation methods, and different configuration APIs.
|
||||
|
||||
## Decision
|
||||
|
||||
We implement a **trait-based architecture with compile-time capability verification** that provides:
|
||||
|
||||
1. **Type-safe abstractions** via parameterized traits: `AlertReceiver<S>`, `AlertRule<S>`, `ScrapeTarget<S>`
|
||||
2. **Compile-time topology compatibility** via the `Observability<S>` capability bound
|
||||
3. **Three levels of abstraction**: Cluster, Tenant, and Application monitoring
|
||||
4. **Pre-built alert rules** as functions that return typed structs
|
||||
|
||||
### Core Traits
|
||||
|
||||
```rust
|
||||
// domain/topology/monitoring.rs
|
||||
|
||||
/// Marker trait for systems that send alerts (Prometheus, etc.)
|
||||
pub trait AlertSender: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
/// Defines how a receiver (Discord, Slack, etc.) builds its configuration
|
||||
/// for a specific sender type
|
||||
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||
}
|
||||
|
||||
/// Defines how an alert rule builds its PrometheusRule configuration
|
||||
pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||
fn build_rule(&self) -> Result<serde_json::Value, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
fn clone_box(&self) -> Box<dyn AlertRule<S>>;
|
||||
}
|
||||
|
||||
/// Capability that topologies implement to support monitoring
|
||||
pub trait Observability<S: AlertSender> {
|
||||
async fn install_alert_sender(&self, sender: &S, inventory: &Inventory)
|
||||
-> Result<PreparationOutcome, PreparationError>;
|
||||
async fn install_receivers(&self, sender: &S, inventory: &Inventory,
|
||||
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>) -> Result<...>;
|
||||
async fn install_rules(&self, sender: &S, inventory: &Inventory,
|
||||
rules: Option<Vec<Box<dyn AlertRule<S>>>>) -> Result<...>;
|
||||
async fn add_scrape_targets(&self, sender: &S, inventory: &Inventory,
|
||||
scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>) -> Result<...>;
|
||||
async fn ensure_monitoring_installed(&self, sender: &S, inventory: &Inventory)
|
||||
-> Result<...>;
|
||||
}
|
||||
```
|
||||
|
||||
### Alert Sender Types
|
||||
|
||||
Each monitoring stack is a distinct `AlertSender`:
|
||||
|
||||
| Sender | Module | Use Case |
|
||||
|--------|--------|----------|
|
||||
| `OpenshiftClusterAlertSender` | `monitoring/okd/` | OKD/OpenShift built-in monitoring |
|
||||
| `KubePrometheus` | `monitoring/kube_prometheus/` | Helm-deployed kube-prometheus-stack |
|
||||
| `Prometheus` | `monitoring/prometheus/` | Standalone Prometheus via Helm |
|
||||
| `RedHatClusterObservability` | `monitoring/red_hat_cluster_observability/` | RHOB operator |
|
||||
| `Grafana` | `monitoring/grafana/` | Grafana-managed alerting |
|
||||
|
||||
### Three Levels of Monitoring
|
||||
|
||||
#### 1. Cluster-Level Monitoring
|
||||
|
||||
For cluster administrators. Full control over monitoring infrastructure.
|
||||
|
||||
```rust
|
||||
// examples/okd_cluster_alerts/src/main.rs
|
||||
OpenshiftClusterAlertScore {
|
||||
sender: OpenshiftClusterAlertSender,
|
||||
receivers: vec![Box::new(DiscordReceiver { ... })],
|
||||
rules: vec![Box::new(alert_rules)],
|
||||
scrape_targets: Some(vec![Box::new(external_exporters)]),
|
||||
}
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- Cluster-scoped CRDs and resources
|
||||
- Can add external scrape targets (outside cluster)
|
||||
- Manages Alertmanager configuration
|
||||
- Requires cluster-admin privileges
|
||||
|
||||
#### 2. Tenant-Level Monitoring
|
||||
|
||||
For teams confined to namespaces. The topology determines tenant context.
|
||||
|
||||
```rust
|
||||
// The topology's Observability impl handles namespace scoping
|
||||
impl Observability<KubePrometheus> for K8sAnywhereTopology {
|
||||
async fn install_rules(&self, sender: &KubePrometheus, ...) {
|
||||
// Topology knows if it's tenant-scoped
|
||||
let namespace = self.get_tenant_config().await
|
||||
.map(|t| t.name)
|
||||
.unwrap_or("default");
|
||||
// Install rules in tenant namespace
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- Namespace-scoped resources
|
||||
- Cannot modify cluster-level monitoring config
|
||||
- May have restricted receiver types
|
||||
- Runtime validation of permissions (cannot be fully compile-time)
|
||||
|
||||
#### 3. Application-Level Monitoring
|
||||
|
||||
For developers. Zero-config, opinionated monitoring.
|
||||
|
||||
```rust
|
||||
// modules/application/features/monitoring.rs
|
||||
pub struct Monitoring {
|
||||
pub application: Arc<dyn Application>,
|
||||
pub alert_receiver: Vec<Box<dyn AlertReceiver<Prometheus>>>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Observability<Prometheus> + TenantManager + ...>
|
||||
ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<...> {
|
||||
// Auto-creates ServiceMonitor
|
||||
// Auto-installs Ntfy for notifications
|
||||
// Handles tenant namespace automatically
|
||||
// Wires up sensible defaults
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Characteristics:**
|
||||
- Automatic ServiceMonitor creation
|
||||
- Opinionated notification channel (Ntfy)
|
||||
- Tenant-aware via topology
|
||||
- Minimal configuration required
|
||||
|
||||
## Rationale
|
||||
|
||||
### Why Generic Traits Instead of Unified Types?
|
||||
|
||||
Each monitoring stack (OKD, KubePrometheus, RHOB) has fundamentally different CRDs:
|
||||
|
||||
```rust
|
||||
// OKD uses AlertmanagerConfig with different structure
|
||||
AlertmanagerConfig { spec: { receivers: [...] } }
|
||||
|
||||
// RHOB uses secret references for webhook URLs
|
||||
MonitoringStack { spec: { alertmanagerConfig: { discordConfigs: [{ apiURL: { key: "..." } }] } } }
|
||||
|
||||
// KubePrometheus uses Alertmanager CRD with different field names
|
||||
Alertmanager { spec: { config: { receivers: [...] } } }
|
||||
```
|
||||
|
||||
A unified type would either:
|
||||
1. Be a lowest-common-denominator (loses stack-specific features)
|
||||
2. Be a complex union type (hard to use, easy to misconfigure)
|
||||
|
||||
Generic traits let each stack express its configuration naturally while providing a consistent interface.
|
||||
|
||||
### Why Compile-Time Capability Bounds?
|
||||
|
||||
```rust
|
||||
impl<T: Topology + Observability<OpenshiftClusterAlertSender>> Score<T>
|
||||
for OpenshiftClusterAlertScore { ... }
|
||||
```
|
||||
|
||||
This fails at compile time if you try to use `OpenshiftClusterAlertScore` with a topology that doesn't support OKD monitoring. This prevents the "config-is-valid-but-platform-is-wrong" errors that Harmony was designed to eliminate.
|
||||
|
||||
### Why Not a MonitoringStack Abstraction (V2 Approach)?
|
||||
|
||||
The V2 approach proposed a unified `MonitoringStack` that hides sender selection:
|
||||
|
||||
```rust
|
||||
// V2 approach - rejected
|
||||
MonitoringStack::new(MonitoringApiVersion::V2CRD)
|
||||
.add_alert_channel(discord)
|
||||
```
|
||||
|
||||
**Problems:**
|
||||
1. Hides which sender you're using, losing compile-time guarantees
|
||||
2. "Version selection" actually chooses between fundamentally different systems
|
||||
3. Would need to handle all stack-specific features through a generic interface
|
||||
|
||||
The current approach is explicit: you choose `OpenshiftClusterAlertSender` and the compiler verifies compatibility.
|
||||
|
||||
### Why Runtime Validation for Tenants?
|
||||
|
||||
Tenant confinement is determined at runtime by the topology and K8s RBAC. We cannot know at compile time whether a user has cluster-admin or namespace-only access.
|
||||
|
||||
Options considered:
|
||||
1. **Compile-time tenant markers** - Would require modeling entire RBAC hierarchy in types. Over-engineering.
|
||||
2. **Runtime validation** - Current approach. Fails with clear K8s permission errors if insufficient access.
|
||||
3. **No tenant support** - Would exclude a major use case.
|
||||
|
||||
Runtime validation is the pragmatic choice. The failure mode is clear (K8s API error) and occurs early in execution.
|
||||
|
||||
> Note : we will eventually have compile time validation for such things. Rust macros are powerful and we could discover the actual capabilities we're dealing with, similar to sqlx approach in query! macros.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Pros
|
||||
|
||||
1. **Type Safety**: Invalid configurations are caught at compile time
|
||||
2. **Extensibility**: Adding a new monitoring stack requires implementing traits, not modifying core code
|
||||
3. **Clear Separation**: Cluster/Tenant/Application levels have distinct entry points
|
||||
4. **Reusable Rules**: Pre-built alert rules as functions (`high_pvc_fill_rate_over_two_days()`)
|
||||
5. **CRD Accuracy**: Type definitions match actual Kubernetes CRDs exactly
|
||||
|
||||
### Cons
|
||||
|
||||
1. **Implementation Explosion**: `DiscordReceiver` implements `AlertReceiver<S>` for each sender type (3+ implementations)
|
||||
2. **Learning Curve**: Understanding the trait hierarchy takes time
|
||||
3. **clone_box Boilerplate**: Required for trait object cloning (3 lines per impl)
|
||||
|
||||
### Mitigations
|
||||
|
||||
- Implementation explosion is contained: each receiver type has O(senders) implementations, but receivers are rare compared to rules
|
||||
- Learning curve is documented with examples at each level
|
||||
- clone_box boilerplate is minimal and copy-paste
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Unified MonitoringStack Type
|
||||
|
||||
See "Why Not a MonitoringStack Abstraction" above. Rejected for losing compile-time safety.
|
||||
|
||||
### Helm-Only Approach
|
||||
|
||||
Use `HelmScore` directly for each monitoring deployment. Rejected because:
|
||||
- No type safety for alert rules
|
||||
- Cannot compose with application features
|
||||
- No tenant awareness
|
||||
|
||||
### Separate Modules Per Use Case
|
||||
|
||||
Have `cluster_monitoring/`, `tenant_monitoring/`, `app_monitoring/` as separate modules. Rejected because:
|
||||
- Massive code duplication
|
||||
- No shared abstraction for receivers/rules
|
||||
- Adding a feature requires three implementations
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Module Structure
|
||||
|
||||
```
|
||||
modules/monitoring/
|
||||
├── mod.rs # Public exports
|
||||
├── alert_channel/ # Receivers (Discord, Webhook)
|
||||
├── alert_rule/ # Rules and pre-built alerts
|
||||
│ ├── prometheus_alert_rule.rs
|
||||
│ └── alerts/ # Library of pre-built rules
|
||||
│ ├── k8s/ # K8s-specific (pvc, pod, memory)
|
||||
│ └── infra/ # Infrastructure (opnsense, dell)
|
||||
├── okd/ # OpenshiftClusterAlertSender
|
||||
├── kube_prometheus/ # KubePrometheus
|
||||
├── prometheus/ # Prometheus
|
||||
├── red_hat_cluster_observability/ # RHOB
|
||||
├── grafana/ # Grafana
|
||||
├── application_monitoring/ # Application-level scores
|
||||
└── scrape_target/ # External scrape targets
|
||||
```
|
||||
|
||||
### Adding a New Alert Sender
|
||||
|
||||
1. Create sender type: `pub struct MySender; impl AlertSender for MySender { ... }`
|
||||
2. Implement `Observability<MySender>` for topologies that support it
|
||||
3. Create CRD types in `crd/` subdirectory
|
||||
4. Implement `AlertReceiver<MySender>` for existing receivers
|
||||
5. Implement `AlertRule<MySender>` for `AlertManagerRuleGroup`
|
||||
|
||||
### Adding a New Alert Rule
|
||||
|
||||
```rust
|
||||
pub fn my_custom_alert() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule::new("MyAlert", "up == 0")
|
||||
.for_duration("5m")
|
||||
.label("severity", "critical")
|
||||
.annotation("summary", "Service is down")
|
||||
}
|
||||
```
|
||||
|
||||
No trait implementation needed - `AlertManagerRuleGroup` already handles conversion.
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- [ADR-013](013-monitoring-notifications.md): Notification channel selection (ntfy)
|
||||
- [ADR-011](011-multi-tenant-cluster.md): Multi-tenant cluster architecture
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "example-monitoring-v2"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony-k8s = { path = "../../harmony-k8s" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
kube = { workspace = true }
|
||||
schemars = "0.8"
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
url = { workspace = true }
|
||||
log = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
k8s-openapi = { workspace = true }
|
||||
@@ -1,91 +0,0 @@
|
||||
# Monitoring v2 - Improved Architecture
|
||||
|
||||
This example demonstrates the improved monitoring architecture that addresses the "WTF/minute" issues in the original design.
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### 1. **Single AlertChannel Trait with Generic Sender**
|
||||
|
||||
The original design required 9-12 implementations for each alert channel (Discord, Webhook, etc.) - one for each sender type. The new design uses a single trait with generic sender parameterization:
|
||||
|
||||
pub trait AlertChannel<Sender: AlertSender> {
|
||||
async fn install_config(&self, sender: &Sender) -> Result<Outcome, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
**Benefits:**
|
||||
- One Discord implementation works with all sender types
|
||||
- Type safety at compile time
|
||||
- No runtime dispatch overhead
|
||||
|
||||
### 2. **MonitoringStack Abstraction**
|
||||
|
||||
Instead of manually selecting CRDPrometheus vs KubePrometheus vs RHOBObservability, you now have a unified MonitoringStack that handles versioning:
|
||||
|
||||
let monitoring_stack = MonitoringStack::new(MonitoringApiVersion::V2CRD)
|
||||
.set_namespace("monitoring")
|
||||
.add_alert_channel(discord_receiver)
|
||||
.set_scrape_targets(vec![...]);
|
||||
|
||||
**Benefits:**
|
||||
- Single source of truth for monitoring configuration
|
||||
- Easy to switch between monitoring versions
|
||||
- Automatic version-specific configuration
|
||||
|
||||
### 3. **TenantMonitoringScore - True Composition**
|
||||
|
||||
The original monitoring_with_tenant example just put tenant and monitoring as separate items in a vec. The new design truly composes them:
|
||||
|
||||
let tenant_score = TenantMonitoringScore::new("test-tenant", monitoring_stack);
|
||||
|
||||
This creates a single score that:
|
||||
- Has tenant context
|
||||
- Has monitoring configuration
|
||||
- Automatically installs monitoring scoped to tenant namespace
|
||||
|
||||
**Benefits:**
|
||||
- No more "two separate things" confusion
|
||||
- Automatic tenant namespace scoping
|
||||
- Clear ownership: tenant owns its monitoring
|
||||
|
||||
### 4. **Versioned Monitoring APIs**
|
||||
|
||||
Clear versioning makes it obvious which monitoring stack you're using:
|
||||
|
||||
pub enum MonitoringApiVersion {
|
||||
V1Helm, // Old Helm charts
|
||||
V2CRD, // Current CRDs
|
||||
V3RHOB, // RHOB (future)
|
||||
}
|
||||
|
||||
**Benefits:**
|
||||
- No guessing which API version you're using
|
||||
- Easy to migrate between versions
|
||||
- Backward compatibility path
|
||||
|
||||
## Comparison
|
||||
|
||||
### Original Design (monitoring_with_tenant)
|
||||
- Manual selection of each component
|
||||
- Manual installation of both components
|
||||
- Need to remember to pass both to harmony_cli::run
|
||||
- Monitoring not scoped to tenant automatically
|
||||
|
||||
### New Design (monitoring_v2)
|
||||
- Single composed score
|
||||
- One score does it all
|
||||
|
||||
## Usage
|
||||
|
||||
cd examples/monitoring_v2
|
||||
cargo run
|
||||
|
||||
## Migration Path
|
||||
|
||||
To migrate from the old design to the new:
|
||||
|
||||
1. Replace individual alert channel implementations with AlertChannel<Sender>
|
||||
2. Use MonitoringStack instead of manual *Prometheus selection
|
||||
3. Use TenantMonitoringScore instead of separate TenantScore + monitoring scores
|
||||
4. Select monitoring version via MonitoringApiVersion
|
||||
@@ -1,343 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_yaml::{Mapping, Value};
|
||||
|
||||
use harmony::data::Version;
|
||||
use harmony::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||
use harmony::inventory::Inventory;
|
||||
use harmony::score::Score;
|
||||
use harmony::topology::{Topology, tenant::TenantManager};
|
||||
|
||||
use harmony_k8s::K8sClient;
|
||||
use harmony_types::k8s_name::K8sName;
|
||||
use harmony_types::net::Url;
|
||||
|
||||
pub trait AlertSender: Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> String;
|
||||
fn namespace(&self) -> String;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CRDPrometheus {
|
||||
pub namespace: String,
|
||||
pub client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl AlertSender for CRDPrometheus {
|
||||
fn name(&self) -> String {
|
||||
"CRDPrometheus".to_string()
|
||||
}
|
||||
|
||||
fn namespace(&self) -> String {
|
||||
self.namespace.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RHOBObservability {
|
||||
pub namespace: String,
|
||||
pub client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl AlertSender for RHOBObservability {
|
||||
fn name(&self) -> String {
|
||||
"RHOBObservability".to_string()
|
||||
}
|
||||
|
||||
fn namespace(&self) -> String {
|
||||
self.namespace.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct KubePrometheus {
|
||||
pub config: Arc<Mutex<KubePrometheusConfig>>,
|
||||
}
|
||||
|
||||
impl Default for KubePrometheus {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl KubePrometheus {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AlertSender for KubePrometheus {
|
||||
fn name(&self) -> String {
|
||||
"KubePrometheus".to_string()
|
||||
}
|
||||
|
||||
fn namespace(&self) -> String {
|
||||
self.config.lock().unwrap().namespace.clone().unwrap_or_else(|| "monitoring".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KubePrometheusConfig {
|
||||
pub namespace: Option<String>,
|
||||
#[serde(skip)]
|
||||
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
|
||||
}
|
||||
|
||||
impl KubePrometheusConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
namespace: None,
|
||||
alert_receiver_configs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AlertManagerChannelConfig {
|
||||
pub channel_receiver: serde_yaml::Value,
|
||||
pub channel_route: serde_yaml::Value,
|
||||
}
|
||||
|
||||
impl Default for AlertManagerChannelConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
channel_receiver: serde_yaml::Value::Mapping(Default::default()),
|
||||
channel_route: serde_yaml::Value::Mapping(Default::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScrapeTargetConfig {
|
||||
pub service_name: String,
|
||||
pub port: String,
|
||||
pub path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MonitoringApiVersion {
|
||||
V1Helm,
|
||||
V2CRD,
|
||||
V3RHOB,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MonitoringStack {
|
||||
pub version: MonitoringApiVersion,
|
||||
pub namespace: String,
|
||||
pub alert_channels: Vec<Arc<dyn AlertSender>>,
|
||||
pub scrape_targets: Vec<ScrapeTargetConfig>,
|
||||
}
|
||||
|
||||
impl MonitoringStack {
|
||||
pub fn new(version: MonitoringApiVersion) -> Self {
|
||||
Self {
|
||||
version,
|
||||
namespace: "monitoring".to_string(),
|
||||
alert_channels: Vec::new(),
|
||||
scrape_targets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_namespace(mut self, namespace: &str) -> Self {
|
||||
self.namespace = namespace.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn add_alert_channel(mut self, channel: impl AlertSender + 'static) -> Self {
|
||||
self.alert_channels.push(Arc::new(channel));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_scrape_targets(mut self, targets: Vec<(&str, &str, String)>) -> Self {
|
||||
self.scrape_targets = targets
|
||||
.into_iter()
|
||||
.map(|(name, port, path)| ScrapeTargetConfig {
|
||||
service_name: name.to_string(),
|
||||
port: port.to_string(),
|
||||
path,
|
||||
})
|
||||
.collect();
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AlertChannel<Sender: AlertSender> {
|
||||
fn install_config(&self, sender: &Sender);
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscordWebhook {
|
||||
pub name: K8sName,
|
||||
pub url: Url,
|
||||
pub selectors: Vec<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl DiscordWebhook {
|
||||
fn get_config(&self) -> AlertManagerChannelConfig {
|
||||
let mut route = Mapping::new();
|
||||
route.insert(
|
||||
Value::String("receiver".to_string()),
|
||||
Value::String(self.name.to_string()),
|
||||
);
|
||||
route.insert(
|
||||
Value::String("matchers".to_string()),
|
||||
Value::Sequence(vec![Value::String("alertname!=Watchdog".to_string())]),
|
||||
);
|
||||
|
||||
let mut receiver = Mapping::new();
|
||||
receiver.insert(
|
||||
Value::String("name".to_string()),
|
||||
Value::String(self.name.to_string()),
|
||||
);
|
||||
|
||||
let mut discord_config = Mapping::new();
|
||||
discord_config.insert(
|
||||
Value::String("webhook_url".to_string()),
|
||||
Value::String(self.url.to_string()),
|
||||
);
|
||||
|
||||
receiver.insert(
|
||||
Value::String("discord_configs".to_string()),
|
||||
Value::Sequence(vec![Value::Mapping(discord_config)]),
|
||||
);
|
||||
|
||||
AlertManagerChannelConfig {
|
||||
channel_receiver: Value::Mapping(receiver),
|
||||
channel_route: Value::Mapping(route),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AlertChannel<CRDPrometheus> for DiscordWebhook {
|
||||
fn install_config(&self, sender: &CRDPrometheus) {
|
||||
debug!("Installing Discord webhook for CRDPrometheus in namespace: {}", sender.namespace());
|
||||
debug!("Config: {:?}", self.get_config());
|
||||
debug!("Installed!");
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"discord-webhook".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl AlertChannel<RHOBObservability> for DiscordWebhook {
|
||||
fn install_config(&self, sender: &RHOBObservability) {
|
||||
debug!("Installing Discord webhook for RHOBObservability in namespace: {}", sender.namespace());
|
||||
debug!("Config: {:?}", self.get_config());
|
||||
debug!("Installed!");
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"webhook-receiver".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl AlertChannel<KubePrometheus> for DiscordWebhook {
|
||||
fn install_config(&self, sender: &KubePrometheus) {
|
||||
debug!("Installing Discord webhook for KubePrometheus in namespace: {}", sender.namespace());
|
||||
let config = sender.config.lock().unwrap();
|
||||
let ns = config.namespace.clone().unwrap_or_else(|| "monitoring".to_string());
|
||||
debug!("Namespace: {}", ns);
|
||||
let mut config = sender.config.lock().unwrap();
|
||||
config.alert_receiver_configs.push(self.get_config());
|
||||
debug!("Installed!");
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"discord-webhook".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_monitoring_stack() -> MonitoringStack {
|
||||
MonitoringStack::new(MonitoringApiVersion::V2CRD)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TenantMonitoringScore {
|
||||
pub tenant_id: harmony_types::id::Id,
|
||||
pub tenant_name: String,
|
||||
#[serde(skip)]
|
||||
#[serde(default = "default_monitoring_stack")]
|
||||
pub monitoring_stack: MonitoringStack,
|
||||
}
|
||||
|
||||
impl TenantMonitoringScore {
|
||||
pub fn new(tenant_name: &str, monitoring_stack: MonitoringStack) -> Self {
|
||||
Self {
|
||||
tenant_id: harmony_types::id::Id::default(),
|
||||
tenant_name: tenant_name.to_string(),
|
||||
monitoring_stack,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + TenantManager> Score<T> for TenantMonitoringScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(TenantMonitoringInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("{} monitoring [TenantMonitoringScore]", self.tenant_name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TenantMonitoringInterpret {
|
||||
pub score: TenantMonitoringScore,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T: Topology + TenantManager> Interpret<T> for TenantMonitoringInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let tenant_config = topology.get_tenant_config().await.unwrap();
|
||||
let tenant_ns = tenant_config.name.clone();
|
||||
|
||||
match self.score.monitoring_stack.version {
|
||||
MonitoringApiVersion::V1Helm => {
|
||||
debug!("Installing Helm monitoring for tenant {}", tenant_ns);
|
||||
}
|
||||
MonitoringApiVersion::V2CRD => {
|
||||
debug!("Installing CRD monitoring for tenant {}", tenant_ns);
|
||||
}
|
||||
MonitoringApiVersion::V3RHOB => {
|
||||
debug!("Installing RHOB monitoring for tenant {}", tenant_ns);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Installed monitoring stack for tenant {} with version {:?}",
|
||||
self.score.tenant_name,
|
||||
self.score.monitoring_stack.version
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("TenantMonitoringInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
Version::from("1.0.0").unwrap()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
InterpretStatus::SUCCESS
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<harmony_types::id::Id> {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
9
book.toml
Normal file
9
book.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
[book]
|
||||
title = "Harmony"
|
||||
description = "Infrastructure orchestration that treats your platform like first-class code"
|
||||
src = "docs"
|
||||
build-dir = "book"
|
||||
authors = ["NationTech"]
|
||||
|
||||
[output.html]
|
||||
mathjax-support = false
|
||||
11
build/book.sh
Executable file
11
build/book.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
cargo install mdbook --locked
|
||||
mdbook build
|
||||
|
||||
test -f book/index.html || (echo "ERROR: book/index.html not found" && exit 1)
|
||||
test -f book/concepts.html || (echo "ERROR: book/concepts.html not found" && exit 1)
|
||||
test -f book/guides/getting-started.html || (echo "ERROR: book/guides/getting-started.html not found" && exit 1)
|
||||
@@ -1,6 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo check --all-targets --all-features --keep-going
|
||||
cargo fmt --check
|
||||
16
build/ci.sh
Executable file
16
build/ci.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
BRANCH="${1:-main}"
|
||||
|
||||
echo "=== Running CI for branch: $BRANCH ==="
|
||||
|
||||
echo "--- Checking code ---"
|
||||
./build/check.sh
|
||||
|
||||
echo "--- Building book ---"
|
||||
./build/book.sh
|
||||
|
||||
echo "=== CI passed ==="
|
||||
@@ -13,8 +13,8 @@ If you're new to Harmony, start here:
|
||||
|
||||
See how to use Harmony to solve real-world problems.
|
||||
|
||||
- [**PostgreSQL on Local K3D**](./use-cases/postgresql-on-local-k3d.md): Deploy a production-grade PostgreSQL cluster on a local K3D cluster. The fastest way to get started.
|
||||
- [**OKD on Bare Metal**](./use-cases/okd-on-bare-metal.md): A detailed walkthrough of bootstrapping a high-availability OKD cluster from physical hardware.
|
||||
- [**Deploy a Rust Web App**](./use-cases/deploy-rust-webapp.md): A quick guide to deploying a monitored, containerized web application to a Kubernetes cluster.
|
||||
|
||||
## 3. Component Catalogs
|
||||
|
||||
@@ -31,16 +31,7 @@ Ready to build your own components? These guides show you how.
|
||||
- [**Writing a Score**](./guides/writing-a-score.md): Learn how to create your own `Score` and `Interpret` logic to define a new desired state.
|
||||
- [**Writing a Topology**](./guides/writing-a-topology.md): Learn how to model a new environment (like AWS, GCP, or custom hardware) as a `Topology`.
|
||||
- [**Adding Capabilities**](./guides/adding-capabilities.md): See how to add a `Capability` to your custom `Topology`.
|
||||
- [**Coding Guide**](./coding-guide.md): Conventions and best practices for writing Harmony code.
|
||||
|
||||
## 5. Module Documentation
|
||||
## 5. Architecture Decision Records
|
||||
|
||||
Deep dives into specific Harmony modules and features.
|
||||
|
||||
- [**Monitoring and Alerting**](./monitoring.md): Comprehensive guide to cluster, tenant, and application-level monitoring with support for OKD, KubePrometheus, RHOB, and more.
|
||||
|
||||
## 6. Architecture Decision Records
|
||||
|
||||
Important architectural decisions are documented in the `adr/` directory:
|
||||
|
||||
- [Full ADR Index](../adr/)
|
||||
Harmony's design is documented through Architecture Decision Records (ADRs). See the [ADR Overview](./adr/README.md) for a complete index of all decisions.
|
||||
|
||||
53
docs/SUMMARY.md
Normal file
53
docs/SUMMARY.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Summary
|
||||
|
||||
[Harmony Documentation](./README.md)
|
||||
|
||||
- [Core Concepts](./concepts.md)
|
||||
- [Getting Started Guide](./guides/getting-started.md)
|
||||
|
||||
## Use Cases
|
||||
|
||||
- [PostgreSQL on Local K3D](./use-cases/postgresql-on-local-k3d.md)
|
||||
- [OKD on Bare Metal](./use-cases/okd-on-bare-metal.md)
|
||||
|
||||
## Component Catalogs
|
||||
|
||||
- [Scores Catalog](./catalogs/scores.md)
|
||||
- [Topologies Catalog](./catalogs/topologies.md)
|
||||
- [Capabilities Catalog](./catalogs/capabilities.md)
|
||||
|
||||
## Developer Guides
|
||||
|
||||
- [Developer Guide](./guides/developer-guide.md)
|
||||
- [Writing a Score](./guides/writing-a-score.md)
|
||||
- [Writing a Topology](./guides/writing-a-topology.md)
|
||||
- [Adding Capabilities](./guides/adding-capabilities.md)
|
||||
|
||||
## Configuration
|
||||
|
||||
- [Configuration](./concepts/configuration.md)
|
||||
|
||||
## Architecture Decision Records
|
||||
|
||||
- [ADR Overview](./adr/README.md)
|
||||
- [000 · ADR Template](./adr/000-ADR-Template.md)
|
||||
- [001 · Why Rust](./adr/001-rust.md)
|
||||
- [002 · Hexagonal Architecture](./adr/002-hexagonal-architecture.md)
|
||||
- [003 · Infrastructure Abstractions](./adr/003-infrastructure-abstractions.md)
|
||||
- [004 · iPXE](./adr/004-ipxe.md)
|
||||
- [005 · Interactive Project](./adr/005-interactive-project.md)
|
||||
- [006 · Secret Management](./adr/006-secret-management.md)
|
||||
- [007 · Default Runtime](./adr/007-default-runtime.md)
|
||||
- [008 · Score Display Formatting](./adr/008-score-display-formatting.md)
|
||||
- [009 · Helm and Kustomize Handling](./adr/009-helm-and-kustomize-handling.md)
|
||||
- [010 · Monitoring and Alerting](./adr/010-monitoring-and-alerting.md)
|
||||
- [011 · Multi-Tenant Cluster](./adr/011-multi-tenant-cluster.md)
|
||||
- [012 · Project Delivery Automation](./adr/012-project-delivery-automation.md)
|
||||
- [013 · Monitoring Notifications](./adr/013-monitoring-notifications.md)
|
||||
- [015 · Higher Order Topologies](./adr/015-higher-order-topologies.md)
|
||||
- [016 · Harmony Agent and Global Mesh](./adr/016-Harmony-Agent-And-Global-Mesh-For-Decentralized-Workload-Management.md)
|
||||
- [017-1 · NATS Clusters Interconnection](./adr/017-1-Nats-Clusters-Interconnection-Topology.md)
|
||||
- [018 · Template Hydration for Workload Deployment](./adr/018-Template-Hydration-For-Workload-Deployment.md)
|
||||
- [019 · Network Bond Setup](./adr/019-Network-bond-setup.md)
|
||||
- [020 · Interactive Configuration Crate](./adr/020-interactive-configuration-crate.md)
|
||||
- [020-1 · Zitadel + OpenBao Secure Config Store](./adr/020-1-zitadel-openbao-secure-config-store.md)
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
Rejected : See ADR 020 ./020-interactive-configuration-crate.md
|
||||
|
||||
### TODO [#3](https://git.nationtech.io/NationTech/harmony/issues/3):
|
||||
|
||||
238
docs/adr/017-2-reviewed-staleness-detection-algorithm.md
Normal file
238
docs/adr/017-2-reviewed-staleness-detection-algorithm.md
Normal file
@@ -0,0 +1,238 @@
|
||||
Here are some rough notes on the previous design :
|
||||
|
||||
- We found an issue where there could be primary flapping when network latency is larger than the primary self fencing timeout.
|
||||
- e.g. network latency to get nats ack is 30 seconds (extreme but can happen), and self-fencing happens after 50 seconds. Then at second 50 self-fencing would occur, and then at second 60 ack comes in. At this point we reject the ack as already failed because of timeout. Self fencing happens. But then network latency comes back down to 5 seconds and lets one successful heartbeat through, this means the primary comes back to healthy, and the same thing repeats, so the primary flaps.
|
||||
- At least this does not cause split brain since the replica never times out and wins the leadership write since we validate strict write ordering and we force consensus on writes.
|
||||
|
||||
Also, we were seeing that the implementation became more complex. There is a lot of timers to handle and that becomes hard to reason about for edge cases.
|
||||
|
||||
So, we came up with a slightly different approach, inspired by k8s liveness probes.
|
||||
|
||||
We now want to use a failure and success threshold counter . However, on the replica side, all we can do is use a timer. The timer we can use is time since last primary heartbeat jetstream metadata timestamp. We could also try and mitigate clock skew by measuring time between internal clock and jetstream metadata timestamp when writing our own heartbeat (not for now, but worth thinking about, though I feel like it is useless).
|
||||
|
||||
So the current working design is this :
|
||||
|
||||
configure :
|
||||
- number of consecutive success to mark the node as UP
|
||||
- number of consecutive failures to mark the node as DOWN
|
||||
- note that success/failure must be consecutive. One success in a row of failures is enough to keep service up. This allows for various configuration profiles, from very stict availability to very lenient depending on the number of failure tolerated and success required to keep the service up.
|
||||
- failure_threshold at 100 will let a service fail (or timeout) 99/100 and stay up
|
||||
- success_threshold at 100 will not bring back up a service until it has succeeded 100 heartbeat in a row
|
||||
- failure threshold at 1 will fail the service at the slightest network latency spike/packet loss
|
||||
- success threshold at 1 will bring the service up very quickly and may cause flapping in unstable network conditions
|
||||
|
||||
|
||||
```
|
||||
# heartbeat session log
|
||||
# failure threshold : 3
|
||||
# success threshold : 2
|
||||
|
||||
STATUS UP :
|
||||
t=1 probe : fail f=1 s=0
|
||||
t=2 probe : fail : f=2 s=0
|
||||
t=3 probe : ok f=0 s=1
|
||||
t=4 probe : fail f=1 s=0
|
||||
```
|
||||
|
||||
Scenario :
|
||||
|
||||
failure threshold = 2
|
||||
heartbeat timeout = 1s
|
||||
total before fencing = 2 * 1 = 2s
|
||||
|
||||
staleness detection timer = 2*total before fencing
|
||||
|
||||
can we do this simple multiplication that staleness detection timer (time the replica waits since the last primary heartbeat before promoting itself) is double the time the replica will take before starting the fencing process.
|
||||
|
||||
---
|
||||
|
||||
### Context
|
||||
We are designing a **Staleness-Based Failover Algorithm** for the Harmony Agent. The goal is to manage High Availability (HA) for stateful workloads (like PostgreSQL) across decentralized, variable-quality networks ("Micro Data Centers").
|
||||
|
||||
We are moving away from complex, synchronized clocks in favor of a **Counter-Based Liveness** approach (inspired by Kubernetes probes) for the Primary, and a **Time-Based Watchdog** for the Replica.
|
||||
|
||||
### 1. The Algorithm
|
||||
|
||||
#### The Primary (Self-Health & Fencing)
|
||||
The Primary validates its own "License to Operate" via a heartbeat loop.
|
||||
* **Loop:** Every `heartbeat_interval` (e.g., 1s), it attempts to write a heartbeat to NATS and check the local DB.
|
||||
* **Counters:** It maintains `consecutive_failures` and `consecutive_successes`.
|
||||
* **State Transition:**
|
||||
* **To UNHEALTHY:** If `consecutive_failures >= failure_threshold`, the Primary **Fences Self** (stops DB, releases locks).
|
||||
* **To HEALTHY:** If `consecutive_successes >= success_threshold`, the Primary **Un-fences** (starts DB, acquires locks).
|
||||
* **Reset Logic:** A single success resets the failure counter to 0, and vice versa.
|
||||
|
||||
#### The Replica (Staleness Detection)
|
||||
The Replica acts as a passive watchdog observing the NATS stream.
|
||||
* **Calculation:** It calculates a `MaxStaleness` timeout.
|
||||
$$ \text{MaxStaleness} = (\text{failure\_threshold} \times \text{heartbeat\_interval}) \times \text{SafetyMultiplier} $$
|
||||
*(We use a SafetyMultiplier of 2 to ensure the Primary has definitely fenced itself before we take over).*
|
||||
* **Action:** If `Time.now() - LastPrimaryHeartbeat > MaxStaleness`, the Replica assumes the Primary is dead and **Promotes Self**.
|
||||
|
||||
---
|
||||
|
||||
### 2. Configuration Trade-offs
|
||||
|
||||
The separation of `success` and `failure` thresholds allows us to tune the "personality" of the cluster.
|
||||
|
||||
#### Scenario A: The "Nervous" Cluster (High Sensitivity)
|
||||
* **Config:** `failure_threshold: 1`, `success_threshold: 1`
|
||||
* **Behavior:** Fails over immediately upon a single missed packet or slow disk write.
|
||||
* **Pros:** Maximum availability for perfect networks.
|
||||
* **Cons:** **High Flapping Risk.** In a residential network, a microwave turning on might cause a failover.
|
||||
|
||||
#### Scenario B: The "Tank" Cluster (High Stability)
|
||||
* **Config:** `failure_threshold: 10`, `success_threshold: 1`
|
||||
* **Behavior:** The node must be consistently broken for 10 seconds (assuming 1s interval) to give up.
|
||||
* **Pros:** Extremely stable on bad networks (e.g., Starlink, 4G). Ignores transient spikes.
|
||||
* **Cons:** **Slow Failover.** Users experience 10+ seconds of downtime before the Replica even *thinks* about taking over.
|
||||
|
||||
#### Scenario C: The "Sticky" Cluster (Hysteresis)
|
||||
* **Config:** `failure_threshold: 5`, `success_threshold: 5`
|
||||
* **Behavior:** Hard to kill, hard to bring back.
|
||||
* **Pros:** Prevents "Yo-Yo" effects. If a node fails, it must prove it is *really* stable (5 clean checks in a row) before re-joining the cluster.
|
||||
|
||||
---
|
||||
|
||||
### 3. Failure Modes & Behavior Analysis
|
||||
|
||||
Here is how the algorithm handles specific edge cases:
|
||||
|
||||
#### Case 1: Immediate Outage (Power Cut / Kernel Panic)
|
||||
* **Event:** Primary vanishes instantly. No more writes to NATS.
|
||||
* **Primary:** Does nothing (it's dead).
|
||||
* **Replica:** Sees the `LastPrimaryHeartbeat` timestamp age. Once it crosses `MaxStaleness`, it promotes itself.
|
||||
* **Outcome:** Clean failover after the timeout duration.
|
||||
|
||||
#### Case 2: Network Instability (Packet Loss / Jitter)
|
||||
* **Event:** The Primary fails to write to NATS for 2 cycles due to Wi-Fi interference, then succeeds on the 3rd.
|
||||
* **Config:** `failure_threshold: 5`.
|
||||
* **Primary:**
|
||||
* $t=1$: Fail (Counter=1)
|
||||
* $t=2$: Fail (Counter=2)
|
||||
* $t=3$: Success (Counter resets to 0). **State remains HEALTHY.**
|
||||
* **Replica:** Sees a gap in heartbeats but the timestamp never exceeds `MaxStaleness`.
|
||||
* **Outcome:** No downtime, no failover. The system correctly identified this as noise, not failure.
|
||||
|
||||
#### Case 3: High Latency (The "Slow Death")
|
||||
* **Event:** Primary is under heavy load; heartbeats take 1.5s to complete (interval is 1s).
|
||||
* **Primary:** The `timeout` on the heartbeat logic triggers. `consecutive_failures` rises. Eventually, it hits `failure_threshold` and fences itself to prevent data corruption.
|
||||
* **Replica:** Sees the heartbeats stop (or arrive too late). The timestamp ages out.
|
||||
* **Outcome:** Primary fences self -> Replica waits for safety buffer -> Replica promotes. **Split-brain is avoided** because the Primary killed itself *before* the Replica acted (due to the SafetyMultiplier).
|
||||
|
||||
#### Case 4: Replica Network Partition
|
||||
* **Event:** Replica loses internet connection; Primary is fine.
|
||||
* **Replica:** Sees `LastPrimaryHeartbeat` age out (because it can't reach NATS). It *wants* to promote itself.
|
||||
* **Constraint:** To promote, the Replica must write to NATS. Since it is partitioned, the NATS write fails.
|
||||
* **Outcome:** The Replica remains in Standby (or fails to promote). The Primary continues serving traffic. **Cluster integrity is preserved.**
|
||||
|
||||
|
||||
----
|
||||
|
||||
|
||||
### Context & Use Case
|
||||
We are implementing a High Availability (HA) Failover Strategy for decentralized "Micro Data Centers." The core challenge is managing stateful workloads (PostgreSQL) over unreliable networks.
|
||||
|
||||
We solve this using a **Local Fencing First** approach, backed by **NATS JetStream Strict Ordering** for the final promotion authority.
|
||||
|
||||
In CAP theorem terms, we are developing a CP system, intentionally sacrificing availability. In practical terms, we expect an average of two primary outages per year, with a failover delay of around 2 minutes. This translates to an uptime of over five nines. To be precise, 2 outages * 2 minutes = 4 minutes per year = 99.99924% uptime.
|
||||
|
||||
### The Algorithm: Local Fencing & Remote Promotion
|
||||
|
||||
The safety (data consistency) of the system relies on the time gap between the **Primary giving up (Fencing)** and the **Replica taking over (Promotion)**.
|
||||
|
||||
To avoid clock skew issues between agents and datastore (nats), all timestamps comparisons will be done using jetstream metadata. I.E. a harmony agent will never use `Instant::now()` to get a timestamp, it will use `my_last_heartbeat.metadata.timestamp` (conceptually).
|
||||
|
||||
#### 1. Configuration
|
||||
* `heartbeat_timeout` (e.g., 1s): Max time allowed for a NATS write/DB check.
|
||||
* `failure_threshold` (e.g., 2): Consecutive failures before self-fencing.
|
||||
* `failover_timeout` (e.g., 5s): Time since last NATS update of Primary heartbeat before Replica promotes.
|
||||
* This timeout must be carefully configured to allow enough time for the primary to fence itself (after `heartbeat_timeout * failure_threshold`) BEFORE the replica gets promoted to avoid a split brain with two primaries.
|
||||
* Implementing this will rely on the actual deployment configuration. For example, a CNPG based PostgreSQL cluster might require a longer gap (such as 30s) than other technologies.
|
||||
* Expires when `replica_heartbeat.metadata.timestamp - primary_heartbeat.metadata.timestamp > failover_timeout`
|
||||
|
||||
#### 2. The Primary (Self-Preservation)
|
||||
|
||||
The Primary is aggressive about killing itself.
|
||||
|
||||
* It attempts a heartbeat.
|
||||
* If the network latency > `heartbeat_timeout`, the attempt is **cancelled locally** because the heartbeat did not make it back in time.
|
||||
* This counts as a failure and increments the `consecutive_failures` counter.
|
||||
* If `consecutive_failures` hit the threshold, **FENCING occurs immediately**. The database is stopped.
|
||||
|
||||
This means that the Primary will fence itself after `heartbeat_timeout * failure_threshold`.
|
||||
|
||||
#### 3. The Replica (The Watchdog)
|
||||
|
||||
The Replica is patient.
|
||||
|
||||
* It watches the NATS stream to measure if `replica_heartbeat.metadata.timestamp - primary_heartbeat.metadata.timestamp > failover_timeout`
|
||||
* It only attempts promotion if the `failover_timeout` (5s) has passed.
|
||||
* **Crucial:** Careful configuration of the failover_timeout is required. This is the only way to avoid a split brain in case of a network partition where the Primary cannot write its heartbeats in time anymore.
|
||||
* In short, `failover_timeout` should be tuned to be `heartbeat_timeout * failure_threshold + safety_margin`. This `safety_margin` will vary by use case. For example, a CNPG cluster may need 30 seconds to demote a Primary to Replica when fencing is triggered, so `safety_margin` should be at least 30s in that setup.
|
||||
|
||||
Since we forcibly fail timeouts after `heartbeat_timeout`, we are guaranteed that the primary will have **started** the fencing process after `heartbeat_timeout * failure_threshold`.
|
||||
|
||||
But, in a network split scenario where the failed primary is still accessible by clients but cannot write its heartbeat successfully, there is no way to know if the demotion has actually **completed**.
|
||||
|
||||
For example, in a CNPG cluster, the failed Primary agent will attempt to change the CNPG cluster state to read-only. But if anything fails after that attempt (permission error, k8s api failure, CNPG bug, etc) it is possible that the PostgreSQL instance keeps accepting writes.
|
||||
|
||||
While this is not a theoretical failure of the agent's algorithm, this is a practical failure where data corruption occurs.
|
||||
|
||||
This can be fixed by detecting the demotion failure and escalating the fencing procedure aggressiveness. Harmony being an infrastructure orchestrator, it can easily exert radical measures if given the proper credentials, such as forcibly powering off a server, disconnecting its network in the switch configuration, forcibly kill a pod/container/process, etc.
|
||||
|
||||
However, these details are out of scope of this algorithm, as they simply fall under the "fencing procedure".
|
||||
|
||||
The implementation of the fencing procedure itself is not relevant. This algorithm's responsibility stops at calling the fencing procedure in the appropriate situation.
|
||||
|
||||
#### 4. The Demotion Handshake (Return to Normalcy)
|
||||
|
||||
When the original Primary recovers:
|
||||
|
||||
1. It becomes healthy locally but sees `current_primary = Replica`. It waits.
|
||||
2. The Replica (current leader) detects the Original Primary is back (via NATS heartbeats).
|
||||
3. Replica performs a **Clean Demotion**:
|
||||
* Stops DB.
|
||||
* Writes `current_primary = None` to NATS.
|
||||
4. Original Primary sees `current_primary = None` and can launch the promotion procedure.
|
||||
|
||||
Depending on the implementation, the promotion procedure may require a transition phase. Typically, for a PostgreSQL use case the promoting primary will make sure it has caught up on WAL replication before starting to accept writes.
|
||||
|
||||
---
|
||||
|
||||
### Failure Modes & Behavior Analysis
|
||||
|
||||
#### Case 1: Immediate Outage (Power Cut)
|
||||
|
||||
* **Primary:** Dies instantly. Fencing is implicit (machine is off).
|
||||
* **Replica:** Waits for `failover_timeout` (5s). Sees staleness. Promotes self.
|
||||
* **Outcome:** Clean failover after 5s.
|
||||
|
||||
// TODO detail what happens when the primary comes back up. We will likely have to tie PostgreSQL's lifecycle (liveness/readiness probes) with the agent to ensure it does not come back up as primary.
|
||||
|
||||
#### Case 2: High Network Latency on the Primary (The "Split Brain" Trap)
|
||||
|
||||
* **Scenario:** Network latency spikes to 5s on the Primary, still below `heartbeat_timeout` on the Replica.
|
||||
* **T=0 to T=2 (Primary):** Tries to write. Latency (5s) > Timeout (1s). Fails twice.
|
||||
* **T=2 (Primary):** `consecutive_failures` = 2. **Primary Fences Self.** (Service is DOWN).
|
||||
* **T=2 to T=5 (Cluster):** **Read-Only Phase.** No Primary exists.
|
||||
* **T=5 (Replica):** `failover_timeout` reached. Replica promotes self.
|
||||
* **Outcome:** Safe failover. The "Read-Only Gap" (T=2 to T=5) ensures no Split Brain occurred.
|
||||
|
||||
#### Case 3: Replica Network Lag (False Positive)
|
||||
|
||||
* **Scenario:** Replica has high latency, greater than `failover_timeout`; Primary is fine.
|
||||
* **Replica:** Thinks Primary is dead. Tries to promote by setting `cluster_state.current_primary = replica_id`.
|
||||
* **NATS:** Rejects the write because the Primary is still updating the sequence numbers successfully.
|
||||
* **Outcome:** Promotion denied. Primary stays leader.
|
||||
|
||||
#### Case 4: Network Instability (Flapping)
|
||||
|
||||
* **Scenario:** Intermittent packet loss.
|
||||
* **Primary:** Fails 1 heartbeat, succeeds the next. `consecutive_failures` resets.
|
||||
* **Replica:** Sees a slight delay in updates, but never reaches `failover_timeout`.
|
||||
* **Outcome:** No Fencing, No Promotion. System rides out the noise.
|
||||
|
||||
## Contextual notes
|
||||
|
||||
* Clock skew : Tokio relies on monotonic clocks. This means that `tokio::time::sleep(...)` will not be affected by system clock corrections (such as NTP). But monotonic clocks are known to jump forward in some cases such as VM live migrations. This could mean a false timeout of a single heartbeat. If `failure_threshold = 1`, this can mean a false negative on the nodes' health, and a potentially useless demotion.
|
||||
107
docs/adr/017-3-revised-staleness-inspired-by-kubernetes.md
Normal file
107
docs/adr/017-3-revised-staleness-inspired-by-kubernetes.md
Normal file
@@ -0,0 +1,107 @@
|
||||
### Context & Use Case
|
||||
We are implementing a High Availability (HA) Failover Strategy for decentralized "Micro Data Centers." The core challenge is managing stateful workloads (PostgreSQL) over unreliable networks.
|
||||
|
||||
We solve this using a **Local Fencing First** approach, backed by **NATS JetStream Strict Ordering** for the final promotion authority.
|
||||
|
||||
In CAP theorem terms, we are developing a CP system, intentionally sacrificing availability. In practical terms, we expect an average of two primary outages per year, with a failover delay of around 2 minutes. This translates to an uptime of over five nines. To be precise, 2 outages * 2 minutes = 4 minutes per year = 99.99924% uptime.
|
||||
|
||||
### The Algorithm: Local Fencing & Remote Promotion
|
||||
|
||||
The safety (data consistency) of the system relies on the time gap between the **Primary giving up (Fencing)** and the **Replica taking over (Promotion)**.
|
||||
|
||||
To avoid clock skew issues between agents and datastore (nats), all timestamps comparisons will be done using jetstream metadata. I.E. a harmony agent will never use `Instant::now()` to get a timestamp, it will use `my_last_heartbeat.metadata.timestamp` (conceptually).
|
||||
|
||||
#### 1. Configuration
|
||||
* `heartbeat_timeout` (e.g., 1s): Max time allowed for a NATS write/DB check.
|
||||
* `failure_threshold` (e.g., 2): Consecutive failures before self-fencing.
|
||||
* `failover_timeout` (e.g., 5s): Time since last NATS update of Primary heartbeat before Replica promotes.
|
||||
* This timeout must be carefully configured to allow enough time for the primary to fence itself (after `heartbeat_timeout * failure_threshold`) BEFORE the replica gets promoted to avoid a split brain with two primaries.
|
||||
* Implementing this will rely on the actual deployment configuration. For example, a CNPG based PostgreSQL cluster might require a longer gap (such as 30s) than other technologies.
|
||||
* Expires when `replica_heartbeat.metadata.timestamp - primary_heartbeat.metadata.timestamp > failover_timeout`
|
||||
|
||||
#### 2. The Primary (Self-Preservation)
|
||||
|
||||
The Primary is aggressive about killing itself.
|
||||
|
||||
* It attempts a heartbeat.
|
||||
* If the network latency > `heartbeat_timeout`, the attempt is **cancelled locally** because the heartbeat did not make it back in time.
|
||||
* This counts as a failure and increments the `consecutive_failures` counter.
|
||||
* If `consecutive_failures` hit the threshold, **FENCING occurs immediately**. The database is stopped.
|
||||
|
||||
This means that the Primary will fence itself after `heartbeat_timeout * failure_threshold`.
|
||||
|
||||
#### 3. The Replica (The Watchdog)
|
||||
|
||||
The Replica is patient.
|
||||
|
||||
* It watches the NATS stream to measure if `replica_heartbeat.metadata.timestamp - primary_heartbeat.metadata.timestamp > failover_timeout`
|
||||
* It only attempts promotion if the `failover_timeout` (5s) has passed.
|
||||
* **Crucial:** Careful configuration of the failover_timeout is required. This is the only way to avoid a split brain in case of a network partition where the Primary cannot write its heartbeats in time anymore.
|
||||
* In short, `failover_timeout` should be tuned to be `heartbeat_timeout * failure_threshold + safety_margin`. This `safety_margin` will vary by use case. For example, a CNPG cluster may need 30 seconds to demote a Primary to Replica when fencing is triggered, so `safety_margin` should be at least 30s in that setup.
|
||||
|
||||
Since we forcibly fail timeouts after `heartbeat_timeout`, we are guaranteed that the primary will have **started** the fencing process after `heartbeat_timeout * failure_threshold`.
|
||||
|
||||
But, in a network split scenario where the failed primary is still accessible by clients but cannot write its heartbeat successfully, there is no way to know if the demotion has actually **completed**.
|
||||
|
||||
For example, in a CNPG cluster, the failed Primary agent will attempt to change the CNPG cluster state to read-only. But if anything fails after that attempt (permission error, k8s api failure, CNPG bug, etc) it is possible that the PostgreSQL instance keeps accepting writes.
|
||||
|
||||
While this is not a theoretical failure of the agent's algorithm, this is a practical failure where data corruption occurs.
|
||||
|
||||
This can be fixed by detecting the demotion failure and escalating the fencing procedure aggressiveness. Harmony being an infrastructure orchestrator, it can easily exert radical measures if given the proper credentials, such as forcibly powering off a server, disconnecting its network in the switch configuration, forcibly kill a pod/container/process, etc.
|
||||
|
||||
However, these details are out of scope of this algorithm, as they simply fall under the "fencing procedure".
|
||||
|
||||
The implementation of the fencing procedure itself is not relevant. This algorithm's responsibility stops at calling the fencing procedure in the appropriate situation.
|
||||
|
||||
#### 4. The Demotion Handshake (Return to Normalcy)
|
||||
|
||||
When the original Primary recovers:
|
||||
|
||||
1. It becomes healthy locally but sees `current_primary = Replica`. It waits.
|
||||
2. The Replica (current leader) detects the Original Primary is back (via NATS heartbeats).
|
||||
3. Replica performs a **Clean Demotion**:
|
||||
* Stops DB.
|
||||
* Writes `current_primary = None` to NATS.
|
||||
4. Original Primary sees `current_primary = None` and can launch the promotion procedure.
|
||||
|
||||
Depending on the implementation, the promotion procedure may require a transition phase. Typically, for a PostgreSQL use case the promoting primary will make sure it has caught up on WAL replication before starting to accept writes.
|
||||
|
||||
---
|
||||
|
||||
### Failure Modes & Behavior Analysis
|
||||
|
||||
#### Case 1: Immediate Outage (Power Cut)
|
||||
|
||||
* **Primary:** Dies instantly. Fencing is implicit (machine is off).
|
||||
* **Replica:** Waits for `failover_timeout` (5s). Sees staleness. Promotes self.
|
||||
* **Outcome:** Clean failover after 5s.
|
||||
|
||||
// TODO detail what happens when the primary comes back up. We will likely have to tie PostgreSQL's lifecycle (liveness/readiness probes) with the agent to ensure it does not come back up as primary.
|
||||
|
||||
#### Case 2: High Network Latency on the Primary (The "Split Brain" Trap)
|
||||
|
||||
* **Scenario:** Network latency spikes to 5s on the Primary, still below `heartbeat_timeout` on the Replica.
|
||||
* **T=0 to T=2 (Primary):** Tries to write. Latency (5s) > Timeout (1s). Fails twice.
|
||||
* **T=2 (Primary):** `consecutive_failures` = 2. **Primary Fences Self.** (Service is DOWN).
|
||||
* **T=2 to T=5 (Cluster):** **Read-Only Phase.** No Primary exists.
|
||||
* **T=5 (Replica):** `failover_timeout` reached. Replica promotes self.
|
||||
* **Outcome:** Safe failover. The "Read-Only Gap" (T=2 to T=5) ensures no Split Brain occurred.
|
||||
|
||||
#### Case 3: Replica Network Lag (False Positive)
|
||||
|
||||
* **Scenario:** Replica has high latency, greater than `failover_timeout`; Primary is fine.
|
||||
* **Replica:** Thinks Primary is dead. Tries to promote by setting `cluster_state.current_primary = replica_id`.
|
||||
* **NATS:** Rejects the write because the Primary is still updating the sequence numbers successfully.
|
||||
* **Outcome:** Promotion denied. Primary stays leader.
|
||||
|
||||
#### Case 4: Network Instability (Flapping)
|
||||
|
||||
* **Scenario:** Intermittent packet loss.
|
||||
* **Primary:** Fails 1 heartbeat, succeeds the next. `consecutive_failures` resets.
|
||||
* **Replica:** Sees a slight delay in updates, but never reaches `failover_timeout`.
|
||||
* **Outcome:** No Fencing, No Promotion. System rides out the noise.
|
||||
|
||||
## Contextual notes
|
||||
|
||||
* Clock skew : Tokio relies on monotonic clocks. This means that `tokio::time::sleep(...)` will not be affected by system clock corrections (such as NTP). But monotonic clocks are known to jump forward in some cases such as VM live migrations. This could mean a false timeout of a single heartbeat. If `failure_threshold = 1`, this can mean a false negative on the nodes' health, and a potentially useless demotion.
|
||||
* `heartbeat_timeout == heartbeat_interval` : We intentionally do not provide two separate settings for the timeout before considering a heartbeat failed and the interval between heartbeats. It could make sense in some configurations where low network latency is required to have a small `heartbeat_timeout = 50ms` and larger `hartbeat_interval == 2s`, but we do not have a practical use case for it yet. And having timeout larger than interval does not make sense in any situation we can think of at the moment. So we decided to have a single value for both, which makes the algorithm easier to reason about and implement.
|
||||
95
docs/adr/017-staleness-detection-for-failover.md
Normal file
95
docs/adr/017-staleness-detection-for-failover.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Architecture Decision Record: Staleness-Based Failover Mechanism & Observability
|
||||
|
||||
**Status:** Proposed
|
||||
**Date:** 2026-01-09
|
||||
**Precedes:** [016-Harmony-Agent-And-Global-Mesh-For-Decentralized-Workload-Management.md](https://git.nationtech.io/NationTech/harmony/raw/branch/master/adr/016-Harmony-Agent-And-Global-Mesh-For-Decentralized-Workload-Management.md)
|
||||
|
||||
## Context
|
||||
|
||||
In ADR 016, we established the **Harmony Agent** and the **Global Orchestration Mesh** (powered by NATS JetStream) as the foundation for our decentralized infrastructure. We defined the high-level need for a `FailoverStrategy` that can support both financial consistency (CP) and AI availability (AP).
|
||||
|
||||
However, a specific implementation challenge remains: **How do we reliably detect node failure without losing the ability to debug the event later?**
|
||||
|
||||
Standard distributed systems often use "Key Expiration" (TTL) for heartbeats. If a key disappears, the node is presumed dead. While simple, this approach is catastrophic for post-mortem analysis. When the key expires, the evidence of *when* and *how* the failure occurred evaporates.
|
||||
|
||||
For NationTech’s vision of **Humane Computing**—where micro datacenters might be heating a family home or running a local business—reliability and diagnosability are paramount. If a cluster fails over, we owe it to the user to provide a clear, historical log of exactly what happened. We cannot build a "wonderful future for computers" on ephemeral, untraceable errors.
|
||||
|
||||
## Decision
|
||||
|
||||
We will implement a **Staleness Detection** mechanism rather than a Key Expiration mechanism. We will leverage NATS JetStream Key-Value (KV) stores with **History Enabled** to create an immutable audit trail of cluster health.
|
||||
|
||||
### 1. The "Black Box" Flight Recorder (NATS Configuration)
|
||||
We will utilize a persistent NATS KV bucket named `harmony_failover`.
|
||||
* **Storage:** File (Persistent).
|
||||
* **History:** Set to `64` (or higher). This allows us to query the last 64 heartbeat entries to visualize the exact degradation of the primary node before failure.
|
||||
* **TTL:** None. Data never disappears; it only becomes "stale."
|
||||
|
||||
### 2. Data Structures
|
||||
We will define two primary schemas to manage the state.
|
||||
|
||||
|
||||
**A. The Rules of Engagement (`cluster_config`)**
|
||||
This persistent key defines the behavior of the mesh. It allows us to tune failover sensitivity dynamically without redeploying the Agent binary.
|
||||
|
||||
```json
|
||||
{
|
||||
"primary_site_id": "site-a-basement",
|
||||
"replica_site_id": "site-b-cloud",
|
||||
"failover_timeout_ms": 5000, // Time before Replica takes over
|
||||
"heartbeat_interval_ms": 1000 // Frequency of Primary updates
|
||||
}
|
||||
```
|
||||
|
||||
> **Note :** The location for this configuration data structure is TBD. See https://git.nationtech.io/NationTech/harmony/issues/206
|
||||
|
||||
**B. The Heartbeat (`primary_heartbeat`)**
|
||||
The Primary writes this; the Replica watches it.
|
||||
|
||||
```json
|
||||
{
|
||||
"site_id": "site-a-basement",
|
||||
"status": "HEALTHY",
|
||||
"counter": 10452,
|
||||
"timestamp": 1704661549000
|
||||
}
|
||||
```
|
||||
|
||||
### 3. The Failover Algorithm
|
||||
|
||||
**The Primary (Site A) Logic:**
|
||||
The Primary's ability to write to the mesh is its "License to Operate."
|
||||
1. **Write Loop:** Attempts to write `primary_heartbeat` every `heartbeat_interval_ms`.
|
||||
2. **Self-Preservation (Fencing):** If the write fails (NATS Ack timeout or NATS unreachable), the Primary **immediately self-demotes**. It assumes it is network-isolated. This prevents Split Brain scenarios where a partitioned Primary continues to accept writes while the Replica promotes itself.
|
||||
|
||||
**The Replica (Site B) Logic:**
|
||||
The Replica acts as the watchdog.
|
||||
1. **Watch:** Subscribes to updates on `primary_heartbeat`.
|
||||
2. **Staleness Check:** Maintains a local timer. Every time a heartbeat arrives, the timer resets.
|
||||
3. **Promotion:** If the timer exceeds `failover_timeout_ms`, the Replica declares the Primary dead and promotes itself to Leader.
|
||||
4. **Yielding:** If the Replica is Leader, but suddenly receives a valid, new heartbeat from the configured `primary_site_id` (indicating the Primary has recovered), the Replica will voluntarily **demote** itself to restore the preferred topology.
|
||||
|
||||
## Rationale
|
||||
|
||||
**Observability as a First-Class Citizen**
|
||||
By keeping the last 64 heartbeats, we can run `nats kv history` to see the exact timeline. Did the Primary stop suddenly (crash)? or did the heartbeats become erratic and slow before stopping (network congestion)? This data is critical for optimizing the "Micro Data Centers" described in our vision, where internet connections in residential areas may vary in quality.
|
||||
|
||||
**Energy Efficiency & Resource Optimization**
|
||||
NationTech aims to "maximize the value of our energy." A "flapping" cluster (constantly failing over and back) wastes immense energy in data re-synchronization and startup costs. By making the `failover_timeout_ms` configurable via `cluster_config`, we can tune a cluster heating a greenhouse to be less sensitive (slower failover is fine) compared to a cluster running a payment gateway.
|
||||
|
||||
**Decentralized Trust**
|
||||
This architecture relies on NATS as the consensus engine. If the Primary is part of the NATS majority, it lives. If it isn't, it dies. This removes ambiguity and allows us to scale to thousands of independent sites without a central "God mode" controller managing every single failover.
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive**
|
||||
* **Auditability:** Every failover event leaves a permanent trace in the KV history.
|
||||
* **Safety:** The "Write Ack" check on the Primary provides a strong guarantee against Split Brain in `AbsoluteConsistency` mode.
|
||||
* **Dynamic Tuning:** We can adjust timeouts for specific environments (e.g., high-latency satellite links) by updating a JSON key, requiring no downtime.
|
||||
|
||||
**Negative**
|
||||
* **Storage Overhead:** Keeping history requires marginally more disk space on the NATS servers, though for 64 small JSON payloads, this is negligible.
|
||||
* **Clock Skew:** While we rely on NATS server-side timestamps for ordering, extreme clock skew on the client side could confuse the debug logs (though not the failover logic itself).
|
||||
|
||||
## Alignment with Vision
|
||||
This architecture supports the NationTech goal of a **"Beautifully Integrated Design."** It takes the complex, high-stakes problem of distributed consensus and wraps it in a mechanism that is robust enough for enterprise banking yet flexible enough to manage a basement server heating a swimming pool. It bridges the gap between the reliability of Web2 clouds and the decentralized nature of Web3 infrastructure.
|
||||
|
||||
233
docs/adr/020-1-zitadel-openbao-secure-config-store.md
Normal file
233
docs/adr/020-1-zitadel-openbao-secure-config-store.md
Normal file
@@ -0,0 +1,233 @@
|
||||
# ADR 020-1: Zitadel OIDC and OpenBao Integration for the Config Store
|
||||
|
||||
Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Date: 2026-03-18
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
ADR 020 defines a unified `harmony_config` crate with a `ConfigStore` trait. The default team-oriented backend is OpenBao, which provides encrypted storage, versioned KV, audit logging, and fine-grained access control.
|
||||
|
||||
OpenBao requires authentication. The question is how developers authenticate without introducing new credentials to manage.
|
||||
|
||||
The goals are:
|
||||
|
||||
- **Zero new credentials.** Developers log in with their existing corporate identity (Google Workspace, GitHub, or Microsoft Entra ID / Azure AD).
|
||||
- **Headless compatibility.** The flow must work over SSH, inside containers, and in CI — environments with no browser or localhost listener.
|
||||
- **Minimal friction.** After a one-time login, authentication should be invisible for weeks of active use.
|
||||
- **Centralized offboarding.** Revoking a user in the identity provider must immediately revoke their access to the config store.
|
||||
|
||||
## Decision
|
||||
|
||||
Developers authenticate to OpenBao through a two-step process: first, they obtain an OIDC token from Zitadel (`sso.nationtech.io`) using the OAuth 2.0 Device Authorization Grant (RFC 8628); then, they exchange that token for a short-lived OpenBao client token via OpenBao's JWT auth method.
|
||||
|
||||
### The authentication flow
|
||||
|
||||
#### Step 1: Trigger
|
||||
|
||||
The `ConfigManager` attempts to resolve a value via the `StoreSource`. The `StoreSource` checks for a cached OpenBao token in `~/.local/share/harmony/session.json`. If the token is missing or expired, authentication begins.
|
||||
|
||||
#### Step 2: Device Authorization Request
|
||||
|
||||
Harmony sends a `POST` to Zitadel's device authorization endpoint:
|
||||
|
||||
```
|
||||
POST https://sso.nationtech.io/oauth/v2/device_authorization
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
client_id=<harmony_client_id>&scope=openid email profile offline_access
|
||||
```
|
||||
|
||||
Zitadel responds with:
|
||||
|
||||
```json
|
||||
{
|
||||
"device_code": "dOcbPeysDhT26ZatRh9n7Q",
|
||||
"user_code": "GQWC-FWFK",
|
||||
"verification_uri": "https://sso.nationtech.io/device",
|
||||
"verification_uri_complete": "https://sso.nationtech.io/device?user_code=GQWC-FWFK",
|
||||
"expires_in": 300,
|
||||
"interval": 5
|
||||
}
|
||||
```
|
||||
|
||||
#### Step 3: User prompt
|
||||
|
||||
Harmony prints the code and URL to the terminal:
|
||||
|
||||
```
|
||||
[Harmony] To authenticate, open your browser to:
|
||||
https://sso.nationtech.io/device
|
||||
and enter code: GQWC-FWFK
|
||||
|
||||
Or visit: https://sso.nationtech.io/device?user_code=GQWC-FWFK
|
||||
```
|
||||
|
||||
If a desktop environment is detected, Harmony also calls `open` / `xdg-open` to launch the browser automatically. The `verification_uri_complete` URL pre-fills the code, so the user only needs to click "Confirm" after logging in.
|
||||
|
||||
There is no localhost HTTP listener. The CLI does not need to bind a port or receive a callback. This is what makes the device flow work over SSH, in containers, and through corporate firewalls — unlike the `oc login` approach which spins up a temporary web server to catch a redirect.
|
||||
|
||||
#### Step 4: User login
|
||||
|
||||
The developer logs in through Zitadel's web UI using one of the configured identity providers:
|
||||
|
||||
- **Google Workspace** — for teams using Google as their corporate identity.
|
||||
- **GitHub** — for open-source or GitHub-centric teams.
|
||||
- **Microsoft Entra ID (Azure AD)** — for enterprise clients, particularly common in Quebec and the broader Canadian public sector.
|
||||
|
||||
Zitadel federates the login to the chosen provider. The developer authenticates with their existing corporate credentials. No new password is created.
|
||||
|
||||
#### Step 5: Polling
|
||||
|
||||
While the user is authenticating in the browser, Harmony polls Zitadel's token endpoint at the interval specified in the device authorization response (typically 5 seconds):
|
||||
|
||||
```
|
||||
POST https://sso.nationtech.io/oauth/v2/token
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
grant_type=urn:ietf:params:oauth:grant-type:device_code
|
||||
&device_code=dOcbPeysDhT26ZatRh9n7Q
|
||||
&client_id=<harmony_client_id>
|
||||
```
|
||||
|
||||
Before the user completes login, Zitadel responds with `authorization_pending`. Once the user consents, Zitadel returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"access_token": "...",
|
||||
"token_type": "Bearer",
|
||||
"expires_in": 3600,
|
||||
"refresh_token": "...",
|
||||
"id_token": "eyJhbGciOiJSUzI1NiIs..."
|
||||
}
|
||||
```
|
||||
|
||||
The `scope=offline_access` in the initial request is what causes Zitadel to issue a `refresh_token`.
|
||||
|
||||
#### Step 6: OpenBao JWT exchange
|
||||
|
||||
Harmony sends the `id_token` (a JWT signed by Zitadel) to OpenBao's JWT auth method:
|
||||
|
||||
```
|
||||
POST https://secrets.nationtech.io/v1/auth/jwt/login
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"role": "harmony-developer",
|
||||
"jwt": "eyJhbGciOiJSUzI1NiIs..."
|
||||
}
|
||||
```
|
||||
|
||||
OpenBao validates the JWT:
|
||||
|
||||
1. It fetches Zitadel's public keys from `https://sso.nationtech.io/oauth/v2/keys` (the JWKS endpoint).
|
||||
2. It verifies the JWT signature.
|
||||
3. It reads the claims (`email`, `groups`, and any custom claims mapped from the upstream identity provider, such as Azure AD tenant or Google Workspace org).
|
||||
4. It evaluates the claims against the `bound_claims` and `bound_audiences` configured on the `harmony-developer` role.
|
||||
5. If validation passes, OpenBao returns a client token:
|
||||
|
||||
```json
|
||||
{
|
||||
"auth": {
|
||||
"client_token": "hvs.CAES...",
|
||||
"policies": ["harmony-dev"],
|
||||
"metadata": { "role": "harmony-developer" },
|
||||
"lease_duration": 14400,
|
||||
"renewable": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Harmony caches the OpenBao token, the OIDC refresh token, and the token expiry timestamps to `~/.local/share/harmony/session.json` with `0600` file permissions.
|
||||
|
||||
### OpenBao storage structure
|
||||
|
||||
All configuration and secret state is stored in an OpenBao Versioned KV v2 engine.
|
||||
|
||||
Path taxonomy:
|
||||
|
||||
```
|
||||
harmony/<organization>/<project>/<environment>/<key>
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
harmony/nationtech/my-app/staging/PostgresConfig
|
||||
harmony/nationtech/my-app/production/PostgresConfig
|
||||
harmony/nationtech/my-app/local-shared/PostgresConfig
|
||||
```
|
||||
|
||||
The `ConfigClass` (Standard vs. Secret) can influence OpenBao policy structure — for example, `Secret`-class paths could require stricter ACLs or additional audit backends — but the path taxonomy itself does not change. This is an operational concern configured in OpenBao policies, not a structural one enforced by path naming.
|
||||
|
||||
### Token lifecycle and silent refresh
|
||||
|
||||
The system manages three tokens with different lifetimes:
|
||||
|
||||
| Token | TTL | Max TTL | Purpose |
|
||||
|---|---|---|---|
|
||||
| OpenBao client token | 4 hours | 24 hours | Read/write config store |
|
||||
| OIDC ID token | 1 hour | — | Exchange for OpenBao token |
|
||||
| OIDC refresh token | 90 days absolute, 30 days inactivity | — | Obtain new ID tokens silently |
|
||||
|
||||
The refresh flow, from the developer's perspective:
|
||||
|
||||
1. **Same session (< 4 hours since last use).** The cached OpenBao token is still valid. No network call to Zitadel. Fastest path.
|
||||
2. **Next day (OpenBao token expired, refresh token valid).** Harmony uses the OIDC `refresh_token` to request a new `id_token` from Zitadel's token endpoint (`grant_type=refresh_token`). It then exchanges the new `id_token` for a fresh OpenBao token. This happens silently. The developer sees no prompt.
|
||||
3. **OpenBao token near max TTL (approaching 24 hours of cumulative renewals).** Instead of renewing, Harmony re-authenticates using the refresh token to get a completely fresh OpenBao token. Transparent to the user.
|
||||
4. **After 30 days of inactivity.** The OIDC refresh token expires. Harmony falls back to the device flow (Step 2 above) and prompts the user to re-authenticate in the browser. This is the only scenario where a returning developer sees a login prompt.
|
||||
5. **User offboarded.** An administrator revokes the user's account or group membership in Zitadel. The next time the refresh token is used, Zitadel rejects it. The device flow also fails because the user can no longer authenticate. Access is terminated without any action needed on the OpenBao side.
|
||||
|
||||
OpenBao token renewal uses the `/auth/token/renew-self` endpoint with the `X-Vault-Token` header. Harmony renews proactively at ~75% of the TTL to avoid race conditions.
|
||||
|
||||
### OpenBao role configuration
|
||||
|
||||
The OpenBao JWT auth role for Harmony developers:
|
||||
|
||||
```bash
|
||||
bao write auth/jwt/config \
|
||||
oidc_discovery_url="https://sso.nationtech.io" \
|
||||
bound_issuer="https://sso.nationtech.io"
|
||||
|
||||
bao write auth/jwt/role/harmony-developer \
|
||||
role_type="jwt" \
|
||||
bound_audiences="<harmony_client_id>" \
|
||||
user_claim="email" \
|
||||
groups_claim="urn:zitadel:iam:org:project:roles" \
|
||||
policies="harmony-dev" \
|
||||
ttl="4h" \
|
||||
max_ttl="24h" \
|
||||
token_type="service"
|
||||
```
|
||||
|
||||
The `bound_audiences` claim ties the role to the specific Harmony Zitadel application. The `groups_claim` allows mapping Zitadel project roles to OpenBao policies for per-team or per-project access control.
|
||||
|
||||
### Self-hosted deployments
|
||||
|
||||
For organizations running their own infrastructure, the same architecture applies. The operator deploys Zitadel and OpenBao using Harmony's existing `ZitadelScore` and `OpenbaoScore`. The only configuration needed is three environment variables (or their equivalents in the bootstrap config):
|
||||
|
||||
- `HARMONY_SSO_URL` — the Zitadel instance URL.
|
||||
- `HARMONY_SECRETS_URL` — the OpenBao instance URL.
|
||||
- `HARMONY_SSO_CLIENT_ID` — the Zitadel application client ID.
|
||||
|
||||
None of these are secrets. They can be committed to an infrastructure repository or distributed via any convenient channel.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Developers authenticate with existing corporate credentials. No new passwords, no static tokens to distribute.
|
||||
- The device flow works in every environment: local terminal, SSH, containers, CI runners, corporate VPNs.
|
||||
- Silent token refresh keeps developers authenticated for weeks without any manual intervention.
|
||||
- User offboarding is a single action in Zitadel. No OpenBao token rotation or manual revocation required.
|
||||
- Azure AD / Microsoft Entra ID support addresses the enterprise and public sector market.
|
||||
|
||||
### Negative
|
||||
|
||||
- The OAuth state machine (device code polling, token refresh, error handling) adds implementation complexity compared to a static token approach.
|
||||
- Developers must have network access to `sso.nationtech.io` and `secrets.nationtech.io` to pull or push configuration state. True offline work falls back to the local file store, which does not sync with the team.
|
||||
- The first login per machine requires a browser interaction. Fully headless first-run scenarios (e.g., a fresh CI runner with no pre-seeded tokens) must use `EnvSource` overrides or a service account JWT.
|
||||
177
docs/adr/020-interactive-configuration-crate.md
Normal file
177
docs/adr/020-interactive-configuration-crate.md
Normal file
@@ -0,0 +1,177 @@
|
||||
# ADR 020: Unified Configuration and Secret Management
|
||||
|
||||
Author: Jean-Gabriel Gill-Couture
|
||||
|
||||
Date: 2026-03-18
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
Harmony's orchestration logic depends on runtime data that falls into two categories:
|
||||
|
||||
1. **Secrets** — credentials, tokens, private keys.
|
||||
2. **Operational configuration** — deployment targets, host selections, port assignments, reboot decisions, and similar contextual choices.
|
||||
|
||||
Both categories share the same fundamental lifecycle: a value must be acquired before execution can proceed, it may come from several backends (environment variable, remote store, interactive prompt), and it must be shareable across a team without polluting the Git repository.
|
||||
|
||||
Treating these categories as separate subsystems forces developers to choose between a "config API" and a "secret API" at every call site. The only meaningful difference between the two is how the storage backend handles the data (plaintext vs. encrypted, audited vs. unaudited) and how the CLI displays it (visible vs. masked). That difference belongs in the backend, not in the application code.
|
||||
|
||||
Three concrete problems drive this change:
|
||||
|
||||
- **Async terminal corruption.** `inquire` prompts assume exclusive terminal ownership. Background tokio tasks emitting log output during a prompt corrupt the terminal state. This is inherent to Harmony's concurrent orchestration model.
|
||||
- **Untestable code paths.** Any function containing an inline `inquire` call requires a real TTY to execute. Unit testing is impossible without ignoring the test entirely.
|
||||
- **No backend integration.** Inline prompts cannot be answered from a remote store, an environment variable, or a CI pipeline. Every automated deployment that passes through a prompting code path requires a human operator at a terminal.
|
||||
|
||||
## Decision
|
||||
|
||||
A single workspace crate, `harmony_config`, provides all configuration and secret acquisition for Harmony. It replaces both `harmony_secret` and all inline `inquire` usage.
|
||||
|
||||
### Schema in Git, state in the store
|
||||
|
||||
The Rust type system serves as the configuration schema. Developers declare what configuration is needed by defining structs:
|
||||
|
||||
```rust
|
||||
#[derive(Config, Serialize, Deserialize, JsonSchema, InteractiveParse)]
|
||||
struct PostgresConfig {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
#[config(secret)]
|
||||
pub password: String,
|
||||
}
|
||||
```
|
||||
|
||||
These structs live in Git and evolve with the code. When a branch introduces a new field, Git tracks that schema change. The actual values live in an external store — OpenBao by default. No `.env` files, no JSON config files, no YAML in the repository.
|
||||
|
||||
### Data classification
|
||||
|
||||
```rust
|
||||
/// Tells the storage backend how to handle the data.
|
||||
pub enum ConfigClass {
|
||||
/// Plaintext storage is acceptable.
|
||||
Standard,
|
||||
/// Must be encrypted at rest, masked in UI, subject to audit logging.
|
||||
Secret,
|
||||
}
|
||||
```
|
||||
|
||||
Classification is determined at the struct level. A struct with no `#[config(secret)]` fields has `ConfigClass::Standard`. A struct with one or more `#[config(secret)]` fields is elevated to `ConfigClass::Secret`. The struct is always stored as a single cohesive JSON blob; field-level splitting across backends is not a concern of the trait.
|
||||
|
||||
The `#[config(secret)]` attribute also instructs the `PromptSource` to mask terminal input for that field during interactive prompting.
|
||||
|
||||
### The Config trait
|
||||
|
||||
```rust
|
||||
pub trait Config: Serialize + DeserializeOwned + JsonSchema + InteractiveParseObj + Sized {
|
||||
/// Stable lookup key. By default, the struct name.
|
||||
const KEY: &'static str;
|
||||
|
||||
/// How the backend should treat this data.
|
||||
const CLASS: ConfigClass;
|
||||
}
|
||||
```
|
||||
|
||||
A `#[derive(Config)]` proc macro generates the implementation. The macro inspects field attributes to determine `CLASS`.
|
||||
|
||||
### The ConfigStore trait
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait ConfigStore: Send + Sync {
|
||||
async fn get(
|
||||
&self,
|
||||
class: ConfigClass,
|
||||
namespace: &str,
|
||||
key: &str,
|
||||
) -> Result<Option<serde_json::Value>, ConfigError>;
|
||||
|
||||
async fn set(
|
||||
&self,
|
||||
class: ConfigClass,
|
||||
namespace: &str,
|
||||
key: &str,
|
||||
value: &serde_json::Value,
|
||||
) -> Result<(), ConfigError>;
|
||||
}
|
||||
```
|
||||
|
||||
The `class` parameter is a hint. The store implementation decides what to do with it. An OpenBao store may route `Secret` data to a different path prefix or apply stricter ACLs. A future store could split fields across backends — that is an implementation concern, not a trait concern.
|
||||
|
||||
### Resolution chain
|
||||
|
||||
The `ConfigManager` tries sources in priority order:
|
||||
|
||||
1. **`EnvSource`** — reads `HARMONY_CONFIG_{KEY}` as a JSON string. Override hatch for CI/CD pipelines and containerized environments.
|
||||
2. **`StoreSource`** — wraps a `ConfigStore` implementation. For teams, this is the OpenBao backend authenticated via Zitadel OIDC (see ADR 020-1).
|
||||
3. **`PromptSource`** — presents an `interactive-parse` prompt on the terminal. Acquires a process-wide async mutex before rendering to prevent log output corruption.
|
||||
|
||||
When `PromptSource` obtains a value, the `ConfigManager` persists it back to the `StoreSource` so that subsequent runs — by the same developer or any teammate — resolve without prompting.
|
||||
|
||||
Callers that do not include `PromptSource` in their source list never block on a TTY. Test code passes empty source lists and constructs config structs directly.
|
||||
|
||||
### Schema versioning
|
||||
|
||||
The Rust struct is the schema. When a developer renames a field, removes a field, or changes a type on a branch, the store may still contain data shaped for a previous version of the struct. If another team member who does not yet have that commit runs the code, `serde_json::from_value` will fail on the stale entry.
|
||||
|
||||
In the initial implementation, the resolution chain handles this gracefully: a deserialization failure is treated as a cache miss, and the `PromptSource` fires. The prompted value overwrites the stale entry in the store.
|
||||
|
||||
This is sufficient for small teams working on short-lived branches. It is not sufficient at scale, where silent re-prompting could mask real configuration drift.
|
||||
|
||||
A future iteration will introduce a compile-time schema migration mechanism, similar to how `sqlx` verifies queries against a live database at compile time. The mechanism will:
|
||||
|
||||
- Detect schema drift between the Rust struct and the stored JSON.
|
||||
- Apply named, ordered migration functions to transform stored data forward.
|
||||
- Reject ambiguous migrations at compile time rather than silently corrupting state.
|
||||
|
||||
Until that mechanism exists, teams should treat store entries as soft caches: the struct definition is always authoritative, and the store is best-effort.
|
||||
|
||||
## Rationale
|
||||
|
||||
**Why merge secrets and config into one crate?** Separate crates with nearly identical trait shapes (`Secret` vs `Config`, `SecretStore` vs `ConfigStore`) force developers to make a classification decision at every call site. A unified crate with a `ConfigClass` discriminator moves that decision to the struct definition, where it belongs.
|
||||
|
||||
**Why OpenBao as the default backend?** OpenBao is a fully open-source Vault fork under the Linux Foundation. It runs on-premises with no phone-home requirement — a hard constraint for private cloud and regulated environments. Harmony already deploys OpenBao for clients (`OpenbaoScore`), so no new infrastructure is introduced.
|
||||
|
||||
**Why not store values in Git (e.g., encrypted YAML)?** Git-tracked config files create merge conflicts, require re-encryption on team membership changes, and leak metadata (file names, key names) even when values are encrypted. Storing state in OpenBao avoids all of these issues and provides audit logging, access control, and versioned KV out of the box.
|
||||
|
||||
**Why keep `PromptSource`?** Removing interactive prompts entirely would break the zero-infrastructure bootstrapping path and eliminate human-confirmation safety gates for destructive operations (interface reconfiguration, node reboot). The problem was never that prompts exist — it is that they were unavoidable and untestable. Making `PromptSource` an explicit, opt-in entry in the source list restores control.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- A single API surface for all runtime data acquisition.
|
||||
- All currently-ignored tests become runnable without TTY access.
|
||||
- Async terminal corruption is eliminated by the process-wide prompt mutex.
|
||||
- The bootstrapping path requires no infrastructure for a first run; `PromptSource` alone is sufficient.
|
||||
- The team path (OpenBao + Zitadel) reuses infrastructure Harmony already deploys.
|
||||
- User offboarding is a single Zitadel action.
|
||||
|
||||
### Negative
|
||||
|
||||
- Migrating all inline `inquire` and `harmony_secret` call sites is a significant refactoring effort.
|
||||
- Until the schema migration mechanism is built, store entries for renamed or removed fields become stale and must be re-prompted.
|
||||
- The Zitadel device flow introduces a browser step on first login per machine.
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Trait design and crate restructure
|
||||
|
||||
Refactor `harmony_config` to define the final `Config`, `ConfigClass`, and `ConfigStore` traits. Update the derive macro to support `#[config(secret)]` and generate the correct `CLASS` constant. Implement `EnvSource` and `PromptSource` against the new traits. Write comprehensive unit tests using mock stores.
|
||||
|
||||
### Phase 2: Absorb `harmony_secret`
|
||||
|
||||
Migrate the `OpenbaoSecretStore`, `InfisicalSecretStore`, and `LocalFileSecretStore` implementations from `harmony_secret` into `harmony_config` as `ConfigStore` backends. Update all call sites that use `SecretManager::get`, `SecretManager::get_or_prompt`, or `SecretManager::set` to use `harmony_config` equivalents.
|
||||
|
||||
### Phase 3: Migrate inline prompts
|
||||
|
||||
Replace all inline `inquire` call sites in the `harmony` crate (`infra/brocade.rs`, `infra/network_manager.rs`, `modules/okd/host_network.rs`, and others) with `harmony_config` structs and `get_or_prompt` calls. Un-ignore the affected tests.
|
||||
|
||||
### Phase 4: Zitadel and OpenBao integration
|
||||
|
||||
Implement the authentication flow described in ADR 020-1. Wire `StoreSource` to use Zitadel OIDC tokens for OpenBao access. Implement token caching and silent refresh.
|
||||
|
||||
### Phase 5: Remove `harmony_secret`
|
||||
|
||||
Delete the `harmony_secret` and `harmony_secret_derive` crates from the workspace. All functionality now lives in `harmony_config`.
|
||||
63
docs/adr/README.md
Normal file
63
docs/adr/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Architecture Decision Records
|
||||
|
||||
An Architecture Decision Record (ADR) documents a significant architectural decision made during the development of Harmony — along with its context, rationale, and consequences.
|
||||
|
||||
## Why We Use ADRs
|
||||
|
||||
As a platform engineering framework used by a team, Harmony accumulates technical decisions over time. ADRs help us:
|
||||
|
||||
- **Track rationale** — understand _why_ a decision was made, not just _what_ was decided
|
||||
- ** onboard new contributors** — the "why" is preserved even when team membership changes
|
||||
- **Avoid repeating past mistakes** — previous decisions and their context are searchable
|
||||
- **Manage technical debt** — ADRs make it easier to revisit and revise past choices
|
||||
|
||||
An ADR captures a decision at a point in time. It is not a specification — it is a record of reasoning.
|
||||
|
||||
## ADR Format
|
||||
|
||||
Every ADR follows this structure:
|
||||
|
||||
| Section | Purpose |
|
||||
|---------|---------|
|
||||
| **Status** | Proposed / Pending / Accepted / Implemented / Deprecated |
|
||||
| **Context** | The problem or background — the "why" behind this decision |
|
||||
| **Decision** | The chosen solution or direction |
|
||||
| **Rationale** | Reasoning behind the decision |
|
||||
| **Consequences** | Both positive and negative outcomes |
|
||||
| **Alternatives considered** | Other options that were evaluated |
|
||||
| **Additional Notes** | Supplementary context, links, or open questions |
|
||||
|
||||
## ADR Index
|
||||
|
||||
| Number | Title | Status |
|
||||
|--------|-------|--------|
|
||||
| [000](./000-ADR-Template.md) | ADR Template | Reference |
|
||||
| [001](./001-rust.md) | Why Rust | Accepted |
|
||||
| [002](./002-hexagonal-architecture.md) | Hexagonal Architecture | Accepted |
|
||||
| [003](./003-infrastructure-abstractions.md) | Infrastructure Abstractions | Accepted |
|
||||
| [004](./004-ipxe.md) | iPXE | Accepted |
|
||||
| [005](./005-interactive-project.md) | Interactive Project | Proposed |
|
||||
| [006](./006-secret-management.md) | Secret Management | Accepted |
|
||||
| [007](./007-default-runtime.md) | Default Runtime | Accepted |
|
||||
| [008](./008-score-display-formatting.md) | Score Display Formatting | Proposed |
|
||||
| [009](./009-helm-and-kustomize-handling.md) | Helm and Kustomize Handling | Accepted |
|
||||
| [010](./010-monitoring-and-alerting.md) | Monitoring and Alerting | Accepted |
|
||||
| [011](./011-multi-tenant-cluster.md) | Multi-Tenant Cluster | Accepted |
|
||||
| [012](./012-project-delivery-automation.md) | Project Delivery Automation | Proposed |
|
||||
| [013](./013-monitoring-notifications.md) | Monitoring Notifications | Accepted |
|
||||
| [015](./015-higher-order-topologies.md) | Higher Order Topologies | Proposed |
|
||||
| [016](./016-Harmony-Agent-And-Global-Mesh-For-Decentralized-Workload-Management.md) | Harmony Agent and Global Mesh | Proposed |
|
||||
| [017-1](./017-1-Nats-Clusters-Interconnection-Topology.md) | NATS Clusters Interconnection Topology | Proposed |
|
||||
| [018](./018-Template-Hydration-For-Workload-Deployment.md) | Template Hydration for Workload Deployment | Proposed |
|
||||
| [019](./019-Network-bond-setup.md) | Network Bond Setup | Proposed |
|
||||
| [020-1](./020-1-zitadel-openbao-secure-config-store.md) | Zitadel + OpenBao Secure Config Store | Accepted |
|
||||
| [020](./020-interactive-configuration-crate.md) | Interactive Configuration Crate | Proposed |
|
||||
|
||||
## Contributing
|
||||
|
||||
When making a significant technical change:
|
||||
|
||||
1. **Check existing ADRs** — the decision may already be documented
|
||||
2. **Create a new ADR** using the [template](./000-ADR-Template.md) if the change warrants architectural discussion
|
||||
3. **Set status to Proposed** and open it for team review
|
||||
4. Once accepted and implemented, update the status accordingly
|
||||
@@ -84,7 +84,7 @@ Network services that run inside the cluster or as part of the topology.
|
||||
- **OKDLoadBalancerScore**: Configures the high-availability load balancers for the OKD API and ingress.
|
||||
- **OKDBootstrapLoadBalancerScore**: Configures the load balancer specifically for the bootstrap-time API endpoint.
|
||||
- **K8sIngressScore**: Configures an Ingress controller or resource.
|
||||
- [HighAvailabilityHostNetworkScore](../../harmony/src/modules/okd/host_network.rs): Configures network bonds on a host and the corresponding port-channels on the switch stack for high-availability.
|
||||
- **HighAvailabilityHostNetworkScore**: Configures network bonds on a host and the corresponding port-channels on the switch stack for high-availability.
|
||||
|
||||
## Tenant Management
|
||||
|
||||
|
||||
229
docs/coding-guide.md
Normal file
229
docs/coding-guide.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Harmony Coding Guide
|
||||
|
||||
Harmony is an infrastructure automation framework. It is **code-first and code-only**: operators write Rust programs to declare and drive infrastructure, rather than YAML files or DSL configs. Good code here means a good operator experience.
|
||||
|
||||
### Concrete context
|
||||
|
||||
We use here the context of the KVM module to explain the coding style. This will make it very easy to understand and should translate quite well to other modules/contexts managed by Harmony like OPNSense and Kubernetes.
|
||||
|
||||
## Core Philosophy
|
||||
|
||||
### High-level functions over raw primitives
|
||||
|
||||
Callers should not need to know about underlying protocols, XML schemas, or API quirks. A function that deploys a VM should accept meaningful parameters like CPU count, memory, and network name — not XML strings.
|
||||
|
||||
```rust
|
||||
// Bad: caller constructs XML and passes it to a thin wrapper
|
||||
let xml = format!(r#"<domain type='kvm'>...</domain>"#, name, memory_kb, ...);
|
||||
executor.create_vm(&xml).await?;
|
||||
|
||||
// Good: caller describes intent, the module handles representation
|
||||
executor.define_vm(&VmConfig::builder("my-vm")
|
||||
.cpu(4)
|
||||
.memory_gb(8)
|
||||
.disk(DiskConfig::new(50))
|
||||
.network(NetworkRef::named("mylan"))
|
||||
.boot_order([BootDevice::Network, BootDevice::Disk])
|
||||
.build())
|
||||
.await?;
|
||||
```
|
||||
|
||||
The module owns the XML, the virsh invocations, the API calls — not the caller.
|
||||
|
||||
### Use the right abstraction layer
|
||||
|
||||
Prefer native library bindings over shelling out to CLI tools. The `virt` crate provides direct libvirt bindings and should be used instead of spawning `virsh` subprocesses.
|
||||
|
||||
- CLI subprocess calls are fragile: stdout/stderr parsing, exit codes, quoting, PATH differences
|
||||
- Native bindings give typed errors, no temp files, no shell escaping
|
||||
- `virt::connect::Connect` opens a connection; `virt::domain::Domain` manages VMs; `virt::network::Network` manages virtual networks
|
||||
|
||||
### Keep functions small and well-named
|
||||
|
||||
Each function should do one thing. If a function is doing two conceptually separate things, split it. Function names should read like plain English: `ensure_network_active`, `define_vm`, `vm_is_running`.
|
||||
|
||||
### Prefer short modules over large files
|
||||
|
||||
Group related types and functions by concept. A module that handles one resource (e.g., network, domain, storage) is better than a single file for everything.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Use `thiserror` for all error types
|
||||
|
||||
Define error types with `thiserror::Error`. This removes the boilerplate of implementing `Display` and `std::error::Error` by hand, keeps error messages close to their variants, and makes types easy to extend.
|
||||
|
||||
```rust
|
||||
// Bad: hand-rolled Display + std::error::Error
|
||||
#[derive(Debug)]
|
||||
pub enum KVMError {
|
||||
ConnectionError(String),
|
||||
VMNotFound(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for KVMError { ... }
|
||||
impl std::error::Error for KVMError {}
|
||||
|
||||
// Good: derive Display via thiserror
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum KVMError {
|
||||
#[error("connection failed: {0}")]
|
||||
ConnectionFailed(String),
|
||||
#[error("VM not found: {name}")]
|
||||
VmNotFound { name: String },
|
||||
}
|
||||
```
|
||||
|
||||
### Make bubbling errors easy with `?` and `From`
|
||||
|
||||
`?` works on any error type for which there is a `From` impl. Add `From` conversions from lower-level errors into your module's error type so callers can use `?` without boilerplate.
|
||||
|
||||
With `thiserror`, wrapping a foreign error is one line:
|
||||
|
||||
```rust
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum KVMError {
|
||||
#[error("libvirt error: {0}")]
|
||||
Libvirt(#[from] virt::error::Error),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
```
|
||||
|
||||
This means a call that returns `virt::error::Error` can be `?`-propagated into a `Result<_, KVMError>` without any `.map_err(...)`.
|
||||
|
||||
### Typed errors over stringly-typed errors
|
||||
|
||||
Avoid `Box<dyn Error>` or `String` as error return types in library code. Callers need to distinguish errors programmatically — `KVMError::VmAlreadyExists` is actionable, `"VM already exists: foo"` as a `String` is not.
|
||||
|
||||
At binary entry points (e.g., `main`) it is acceptable to convert to `String` or `anyhow::Error` for display.
|
||||
|
||||
---
|
||||
|
||||
## Logging
|
||||
|
||||
### Use the `log` crate macros
|
||||
|
||||
All log output must go through the `log` crate. Never use `println!`, `eprintln!`, or `dbg!` in library code. This makes output compatible with any logging backend (env_logger, tracing, structured logging, etc.).
|
||||
|
||||
```rust
|
||||
// Bad
|
||||
println!("Creating VM: {}", name);
|
||||
|
||||
// Good
|
||||
use log::{info, debug, warn};
|
||||
info!("Creating VM: {name}");
|
||||
debug!("VM XML:\n{xml}");
|
||||
warn!("Network already active, skipping creation");
|
||||
```
|
||||
|
||||
Use the right level:
|
||||
|
||||
| Level | When to use |
|
||||
|---------|-------------|
|
||||
| `error` | Unrecoverable failures (before returning Err) |
|
||||
| `warn` | Recoverable issues, skipped steps |
|
||||
| `info` | High-level progress events visible in normal operation |
|
||||
| `debug` | Detailed operational info useful for debugging |
|
||||
| `trace` | Very granular, per-iteration or per-call data |
|
||||
|
||||
Log before significant operations and after unexpected conditions. Do not log inside tight loops at `info` level.
|
||||
|
||||
---
|
||||
|
||||
## Types and Builders
|
||||
|
||||
### Derive `Serialize` on all public domain types
|
||||
|
||||
All public structs and enums that represent configuration or state should derive `serde::Serialize`. Add `Deserialize` when round-trip serialization is needed.
|
||||
|
||||
### Builder pattern for complex configs
|
||||
|
||||
When a type has more than three fields or optional fields, provide a builder. The builder pattern allows named, incremental construction without positional arguments.
|
||||
|
||||
```rust
|
||||
let config = VmConfig::builder("bootstrap")
|
||||
.cpu(4)
|
||||
.memory_gb(8)
|
||||
.disk(DiskConfig::new(50).labeled("os"))
|
||||
.disk(DiskConfig::new(100).labeled("data"))
|
||||
.network(NetworkRef::named("harmonylan"))
|
||||
.boot_order([BootDevice::Network, BootDevice::Disk])
|
||||
.build();
|
||||
```
|
||||
|
||||
### Avoid `pub` fields on config structs
|
||||
|
||||
Expose data through methods or the builder, not raw field access. This preserves the ability to validate, rename, or change representation without breaking callers.
|
||||
|
||||
---
|
||||
|
||||
## Async
|
||||
|
||||
### Use `tokio` for all async runtime needs
|
||||
|
||||
All async code runs on tokio. Use `tokio::spawn`, `tokio::time`, etc. Use `#[async_trait]` for traits with async methods.
|
||||
|
||||
### No blocking in async context
|
||||
|
||||
Never call blocking I/O (file I/O, network, process spawn) directly in an async function. Use `tokio::fs`, `tokio::process`, or `tokio::task::spawn_blocking` as appropriate.
|
||||
|
||||
---
|
||||
|
||||
## Module Structure
|
||||
|
||||
### Follow the `Score` / `Interpret` pattern
|
||||
|
||||
Modules that represent deployable infrastructure should implement `Score<T: Topology>` and `Interpret<T>`:
|
||||
|
||||
- `Score` is the serializable, clonable configuration declaring *what* to deploy
|
||||
- `Interpret` does the actual work when `execute()` is called
|
||||
|
||||
```rust
|
||||
pub struct KvmScore {
|
||||
network: NetworkConfig,
|
||||
vms: Vec<VmConfig>,
|
||||
}
|
||||
|
||||
impl<T: Topology + KvmHost> Score<T> for KvmScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(KvmInterpret::new(self.clone()))
|
||||
}
|
||||
fn name(&self) -> String { "KvmScore".to_string() }
|
||||
}
|
||||
```
|
||||
|
||||
### Flatten the public API in `mod.rs`
|
||||
|
||||
Internal submodules are implementation detail. Re-export what callers need at the module root:
|
||||
|
||||
```rust
|
||||
// modules/kvm/mod.rs
|
||||
mod connection;
|
||||
mod domain;
|
||||
mod network;
|
||||
mod error;
|
||||
mod xml;
|
||||
|
||||
pub use connection::KvmConnection;
|
||||
pub use domain::{VmConfig, VmConfigBuilder, VmStatus, DiskConfig, BootDevice};
|
||||
pub use error::KvmError;
|
||||
pub use network::NetworkConfig;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Commit Style
|
||||
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/):
|
||||
|
||||
```
|
||||
feat(kvm): add network isolation support
|
||||
fix(kvm): correct memory unit conversion for libvirt
|
||||
refactor(kvm): replace virsh subprocess calls with virt crate bindings
|
||||
docs: add coding guide
|
||||
```
|
||||
|
||||
Keep pull requests small and single-purpose (under ~200 lines excluding generated code). Do not mix refactoring, bug fixes, and new features in one PR.
|
||||
@@ -28,6 +28,11 @@ Harmony's design is based on a few key concepts. Understanding them is the key t
|
||||
- **What it is:** An **Inventory** is the physical material (the "what") used in a cluster. This is most relevant for bare-metal or on-premise topologies.
|
||||
- **Example:** A list of nodes with their roles (control plane, worker), CPU, RAM, and network interfaces. For the `K8sAnywhereTopology`, the inventory might be empty or autoloaded, as the infrastructure is more abstract.
|
||||
|
||||
### 6. Configuration & Secrets
|
||||
|
||||
- **What it is:** Configuration represents the runtime data required to deploy your `Scores`. This includes both non-sensitive state (like cluster hostnames, deployment profiles) and sensitive secrets (like API keys, database passwords).
|
||||
- **How it works:** See the [Configuration Concept Guide](./concepts/configuration.md) to understand Harmony's unified approach to managing schema in Git and state in OpenBao.
|
||||
|
||||
---
|
||||
|
||||
### How They Work Together (The Compile-Time Check)
|
||||
|
||||
107
docs/concepts/configuration.md
Normal file
107
docs/concepts/configuration.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Configuration and Secrets
|
||||
|
||||
Harmony treats configuration and secrets as a single concern. Developers use one crate, `harmony_config`, to declare, store, and retrieve all runtime data — whether it is a public hostname or a database password.
|
||||
|
||||
## The mental model: schema in Git, state in the store
|
||||
|
||||
### Schema
|
||||
|
||||
In Harmony, the Rust code is the configuration schema. You declare what your module needs by defining a struct:
|
||||
|
||||
```rust
|
||||
#[derive(Config, Serialize, Deserialize, JsonSchema, InteractiveParse)]
|
||||
struct PostgresConfig {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
#[config(secret)]
|
||||
pub password: String,
|
||||
}
|
||||
```
|
||||
|
||||
This struct is tracked in Git. When a branch adds a new field, Git tracks that the branch requires a new value. When a branch removes a field, the old value in the store becomes irrelevant. The struct is always authoritative.
|
||||
|
||||
### State
|
||||
|
||||
The actual values live in a config store — by default, OpenBao. No `.env` files, no JSON, no YAML in the repository.
|
||||
|
||||
When you run your code, Harmony reads the struct (schema) and resolves values from the store (state):
|
||||
|
||||
- If the store has the value, it is injected seamlessly.
|
||||
- If the store does not have it, Harmony prompts you in the terminal. Your answer is pushed back to the store automatically.
|
||||
- When a teammate runs the same code, they are not prompted — you already provided the value.
|
||||
|
||||
### How branch switching works
|
||||
|
||||
Because the schema is just Rust code tracked in Git, branch switching works naturally:
|
||||
|
||||
1. You check out `feat/redis`. The code now requires `RedisConfig`.
|
||||
2. You run `cargo run`. Harmony detects that `RedisConfig` has no value in the store. It prompts you.
|
||||
3. You provide the values. Harmony pushes them to OpenBao.
|
||||
4. Your teammate checks out `feat/redis` and runs `cargo run`. No prompt — the values are already in the store.
|
||||
5. You switch back to `main`. `RedisConfig` does not exist in that branch's code. The store entry is ignored.
|
||||
|
||||
## Secrets vs. standard configuration
|
||||
|
||||
From your application code, there is no difference. You always call `harmony_config::get_or_prompt::<T>()`.
|
||||
|
||||
The difference is in the struct definition:
|
||||
|
||||
```rust
|
||||
// Standard config — stored in plaintext, displayed during prompting.
|
||||
#[derive(Config)]
|
||||
struct ClusterConfig {
|
||||
pub api_url: String,
|
||||
pub namespace: String,
|
||||
}
|
||||
|
||||
// Contains a secret field — the entire struct is stored encrypted,
|
||||
// and the password field is masked during terminal prompting.
|
||||
#[derive(Config)]
|
||||
struct DatabaseConfig {
|
||||
pub host: String,
|
||||
#[config(secret)]
|
||||
pub password: String,
|
||||
}
|
||||
```
|
||||
|
||||
If a struct contains any `#[config(secret)]` field, Harmony elevates the entire struct to `ConfigClass::Secret`. The storage backend decides what that means in practice — in the case of OpenBao, it may route the data to a path with stricter ACLs or audit policies.
|
||||
|
||||
## Authentication and team sharing
|
||||
|
||||
Harmony uses Zitadel (hosted at `sso.nationtech.io`) for identity and OpenBao (hosted at `secrets.nationtech.io`) for storage.
|
||||
|
||||
**First run on a new machine:**
|
||||
|
||||
1. Harmony detects that you are not logged in.
|
||||
2. It prints a short code and URL to your terminal, and opens your browser if possible.
|
||||
3. You log in with your corporate identity (Google, GitHub, or Microsoft Entra ID / Azure AD).
|
||||
4. Harmony receives an OIDC token, exchanges it for an OpenBao token, and caches the session locally.
|
||||
|
||||
**Subsequent runs:**
|
||||
|
||||
- Harmony silently refreshes your tokens in the background. You do not need to log in again for up to 90 days of active use.
|
||||
- If you are inactive for 30 days, or if an administrator revokes your access in Zitadel, you will be prompted to re-authenticate.
|
||||
|
||||
**Offboarding:**
|
||||
|
||||
Revoking a user in Zitadel immediately invalidates their ability to refresh tokens or obtain new ones. No manual secret rotation is required.
|
||||
|
||||
## Resolution chain
|
||||
|
||||
When Harmony resolves a config value, it tries sources in order:
|
||||
|
||||
1. **Environment variable** (`HARMONY_CONFIG_{KEY}`) — highest priority. Use this in CI/CD to override any value without touching the store.
|
||||
2. **Config store** (OpenBao for teams, local file for solo/offline use) — the primary source for shared team state.
|
||||
3. **Interactive prompt** — last resort. Prompts the developer and persists the answer back to the store.
|
||||
|
||||
## Schema versioning
|
||||
|
||||
The Rust struct is the single source of truth for what configuration looks like. If a developer renames or removes a field on a branch, the store may still contain data shaped for the old version of the struct. When another developer who does not have that change runs the code, deserialization will fail.
|
||||
|
||||
In the current implementation, this is handled gracefully: a deserialization failure is treated as a miss, and Harmony re-prompts. The new answer overwrites the stale entry.
|
||||
|
||||
A compile-time migration mechanism is planned for a future release to handle this more rigorously at scale.
|
||||
|
||||
## Offline and local development
|
||||
|
||||
If you are working offline or evaluating Harmony without a team OpenBao instance, the `StoreSource` falls back to a local file store at `~/.local/share/harmony/config/`. The developer experience is identical — prompting, caching, and resolution all work the same way. The only difference is that the state is local to your machine and not shared with teammates.
|
||||
135
docs/guides/adding-capabilities.md
Normal file
135
docs/guides/adding-capabilities.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Adding Capabilities
|
||||
|
||||
`Capabilities` are trait methods that a `Topology` exposes to Scores. They are the "how" — the specific APIs and features that let a Score translate intent into infrastructure actions.
|
||||
|
||||
## How Capabilities Work
|
||||
|
||||
When a Score declares it needs certain Capabilities:
|
||||
|
||||
```rust
|
||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for MyScore {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The compiler verifies that the target `Topology` implements both `K8sclient` and `HelmCommand`. If it doesn't, compilation fails. This is the compile-time safety check that prevents invalid configurations from reaching production.
|
||||
|
||||
## Built-in Capabilities
|
||||
|
||||
Harmony provides a set of standard Capabilities:
|
||||
|
||||
| Capability | What it provides |
|
||||
|------------|------------------|
|
||||
| `K8sclient` | A Kubernetes API client |
|
||||
| `HelmCommand` | A configured `helm` CLI invocation |
|
||||
| `TlsRouter` | TLS certificate management |
|
||||
| `NetworkManager` | Host network configuration |
|
||||
| `SwitchClient` | Network switch configuration |
|
||||
| `CertificateManagement` | Certificate issuance via cert-manager |
|
||||
|
||||
## Implementing a Capability
|
||||
|
||||
Capabilities are implemented as trait methods on your Topology:
|
||||
|
||||
```rust
|
||||
use std::sync::Arc;
|
||||
use harmony_k8s::K8sClient;
|
||||
use harmony::topology::K8sclient;
|
||||
|
||||
pub struct MyTopology {
|
||||
kubeconfig: Option<String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl K8sclient for MyTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
let client = match &self.kubeconfig {
|
||||
Some(path) => K8sClient::from_kubeconfig(path).await?,
|
||||
None => K8sClient::try_default().await?,
|
||||
};
|
||||
Ok(Arc::new(client))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Adding a Custom Capability
|
||||
|
||||
For specialized infrastructure needs, add your own Capability as a trait:
|
||||
|
||||
```rust
|
||||
use async_trait::async_trait;
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
/// A capability for configuring network switches
|
||||
#[async_trait]
|
||||
pub trait SwitchClient: Send + Sync {
|
||||
async fn configure_port(
|
||||
&self,
|
||||
switch: &str,
|
||||
port: &str,
|
||||
vlan: u16,
|
||||
) -> Result<(), ExecutorError>;
|
||||
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
switch: &str,
|
||||
name: &str,
|
||||
ports: &[&str],
|
||||
) -> Result<(), ExecutorError>;
|
||||
}
|
||||
```
|
||||
|
||||
Then implement it on your Topology:
|
||||
|
||||
```rust
|
||||
use harmony_infra::brocade::BrocadeClient;
|
||||
|
||||
pub struct MyTopology {
|
||||
switch_client: Arc<dyn SwitchClient>,
|
||||
}
|
||||
|
||||
impl SwitchClient for MyTopology {
|
||||
async fn configure_port(&self, switch: &str, port: &str, vlan: u16) -> Result<(), ExecutorError> {
|
||||
self.switch_client.configure_port(switch, port, vlan).await
|
||||
}
|
||||
|
||||
async fn configure_port_channel(&self, switch: &str, name: &str, ports: &[&str]) -> Result<(), ExecutorError> {
|
||||
self.switch_client.configure_port_channel(switch, name, ports).await
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Now Scores that need `SwitchClient` can run on `MyTopology`.
|
||||
|
||||
## Capability Composition
|
||||
|
||||
Topologies often compose multiple Capabilities to support complex Scores:
|
||||
|
||||
```rust
|
||||
pub struct HAClusterTopology {
|
||||
pub kubeconfig: Option<String>,
|
||||
pub router: Arc<dyn Router>,
|
||||
pub load_balancer: Arc<dyn LoadBalancer>,
|
||||
pub switch_client: Arc<dyn SwitchClient>,
|
||||
pub dhcp_server: Arc<dyn DhcpServer>,
|
||||
pub dns_server: Arc<dyn DnsServer>,
|
||||
// ...
|
||||
}
|
||||
|
||||
impl K8sclient for HAClusterTopology { ... }
|
||||
impl HelmCommand for HAClusterTopology { ... }
|
||||
impl SwitchClient for HAClusterTopology { ... }
|
||||
impl DhcpServer for HAClusterTopology { ... }
|
||||
impl DnsServer for HAClusterTopology { ... }
|
||||
impl Router for HAClusterTopology { ... }
|
||||
impl LoadBalancer for HAClusterTopology { ... }
|
||||
```
|
||||
|
||||
A Score that needs all of these can run on `HAClusterTopology` because the Topology provides all of them.
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Keep Capabilities focused** — one Capability per concern (Kubernetes client, Helm, switch config)
|
||||
- **Return meaningful errors** — use specific error types so Scores can handle failures appropriately
|
||||
- **Make Capabilities optional where sensible** — not every Topology needs every Capability; use `Option<T>` or a separate trait for optional features
|
||||
- **Document preconditions** — if a Capability requires the infrastructure to be in a specific state, document it in the trait doc comments
|
||||
40
docs/guides/developer-guide.md
Normal file
40
docs/guides/developer-guide.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Developer Guide
|
||||
|
||||
This section covers how to extend Harmony by building your own `Score`, `Topology`, and `Capability` implementations.
|
||||
|
||||
## Writing a Score
|
||||
|
||||
A `Score` is a declarative description of desired state. To create your own:
|
||||
|
||||
1. Define a struct that represents your desired state
|
||||
2. Implement the `Score<T>` trait, where `T` is your target `Topology`
|
||||
3. Implement the `Interpret<T>` trait to define how the Score translates to infrastructure actions
|
||||
|
||||
See the [Writing a Score](./writing-a-score.md) guide for a step-by-step walkthrough.
|
||||
|
||||
## Writing a Topology
|
||||
|
||||
A `Topology` models your infrastructure environment. To create your own:
|
||||
|
||||
1. Define a struct that holds your infrastructure configuration
|
||||
2. Implement the `Topology` trait
|
||||
3. Implement the `Capability` traits your Score needs
|
||||
|
||||
See the [Writing a Topology](./writing-a-topology.md) guide for details.
|
||||
|
||||
## Adding Capabilities
|
||||
|
||||
`Capabilities` are the specific APIs or features a `Topology` exposes. They are the bridge between Scores and the actual infrastructure.
|
||||
|
||||
See the [Adding Capabilities](./adding-capabilities.md) guide for details on implementing and exposing Capabilities.
|
||||
|
||||
## Core Traits Reference
|
||||
|
||||
| Trait | Purpose |
|
||||
|-------|---------|
|
||||
| `Score<T>` | Declares desired state ("what") |
|
||||
| `Topology` | Represents infrastructure ("where") |
|
||||
| `Interpret<T>` | Execution logic ("how") |
|
||||
| `Capability` | A feature exposed by a Topology |
|
||||
|
||||
See [Core Concepts](../concepts.md) for the conceptual foundation.
|
||||
@@ -1,42 +1,230 @@
|
||||
# Getting Started Guide
|
||||
|
||||
Welcome to Harmony! This guide will walk you through installing the Harmony framework, setting up a new project, and deploying your first application.
|
||||
This guide walks you through deploying your first application with Harmony — a PostgreSQL cluster on a local Kubernetes cluster (K3D). By the end, you'll understand the core workflow: compile a Score, run it through the Harmony CLI, and verify the result.
|
||||
|
||||
We will build and deploy the "Rust Web App" example, which automatically:
|
||||
## What you'll deploy
|
||||
|
||||
1. Provisions a local K3D (Kubernetes in Docker) cluster.
|
||||
2. Deploys a sample Rust web application.
|
||||
3. Sets up monitoring for the application.
|
||||
A fully functional PostgreSQL cluster running in a local K3D cluster, managed by the CloudNativePG operator. This demonstrates the full Harmony pattern:
|
||||
|
||||
1. Provision a local Kubernetes cluster (K3D)
|
||||
2. Install the required operator (CloudNativePG)
|
||||
3. Create a PostgreSQL cluster
|
||||
4. Expose it as a Kubernetes Service
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you'll need a few tools installed on your system:
|
||||
Before you begin, install the following tools:
|
||||
|
||||
- **Rust & Cargo:** [Install Rust](https://www.rust-lang.org/tools/install)
|
||||
- **Docker:** [Install Docker](https://docs.docker.com/get-docker/) (Required for the K3D local cluster)
|
||||
- **kubectl:** [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (For inspecting the cluster)
|
||||
- **Rust & Cargo:** [Install Rust](https://rust-lang.org/tools/install) (edition 2024)
|
||||
- **Docker:** [Install Docker](https://docs.docker.com/get-docker/) (required for the local K3D cluster)
|
||||
- **kubectl:** [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (optional, for inspecting the cluster)
|
||||
|
||||
## 1. Install Harmony
|
||||
|
||||
First, clone the Harmony repository and build the project. This gives you the `harmony` CLI and all the core libraries.
|
||||
## Step 1: Clone and build
|
||||
|
||||
```bash
|
||||
# Clone the main repository
|
||||
# Clone the repository
|
||||
git clone https://git.nationtech.io/nationtech/harmony
|
||||
cd harmony
|
||||
|
||||
# Build the project (this may take a few minutes)
|
||||
# Build the project (this may take a few minutes on first run)
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
...
|
||||
## Step 2: Run the PostgreSQL example
|
||||
|
||||
## Next Steps
|
||||
```bash
|
||||
cargo run -p example-postgresql
|
||||
```
|
||||
|
||||
Congratulations, you've just deployed an application using true infrastructure-as-code!
|
||||
Harmony will output its progress as it:
|
||||
|
||||
From here, you can:
|
||||
1. **Creates a K3D cluster** named `harmony-postgres-example` (first run only)
|
||||
2. **Installs the CloudNativePG operator** into the cluster
|
||||
3. **Creates a PostgreSQL cluster** with 1 instance and 1 GiB of storage
|
||||
4. **Prints connection details** for your new database
|
||||
|
||||
- [Explore the Catalogs](../catalogs/README.md): See what other [Scores](../catalogs/scores.md) and [Topologies](../catalogs/topologies.md) are available.
|
||||
- [Read the Use Cases](../use-cases/README.md): Check out the [OKD on Bare Metal](./use-cases/okd-on-bare-metal.md) guide for a more advanced scenario.
|
||||
- [Write your own Score](../guides/writing-a-score.md): Dive into the [Developer Guide](./guides/developer-guide.md) to start building your own components.
|
||||
Expected output (abbreviated):
|
||||
|
||||
```
|
||||
[+] Cluster created
|
||||
[+] Installing CloudNativePG operator
|
||||
[+] Creating PostgreSQL cluster
|
||||
[+] PostgreSQL cluster is ready
|
||||
Namespace: harmony-postgres-example
|
||||
Service: harmony-postgres-example-rw
|
||||
Username: postgres
|
||||
Password: <stored in secret harmony-postgres-example-db-user>
|
||||
```
|
||||
|
||||
## Step 3: Verify the deployment
|
||||
|
||||
Check that the PostgreSQL pods are running:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n harmony-postgres-example
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
harmony-postgres-example-1 1/1 Running 0 2m
|
||||
```
|
||||
|
||||
Get the database password:
|
||||
|
||||
```bash
|
||||
kubectl get secret -n harmony-postgres-example harmony-postgres-example-db-user -o jsonpath='{.data.password}' | base64 -d
|
||||
```
|
||||
|
||||
## Step 4: Connect to the database
|
||||
|
||||
Forward the PostgreSQL port to your local machine:
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n harmony-postgres-example svc/harmony-postgres-example-rw 5432:5432
|
||||
```
|
||||
|
||||
In another terminal, connect with `psql`:
|
||||
|
||||
```bash
|
||||
psql -h localhost -p 5432 -U postgres
|
||||
# Enter the password from Step 4 when prompted
|
||||
```
|
||||
|
||||
Try a simple query:
|
||||
|
||||
```sql
|
||||
SELECT version();
|
||||
```
|
||||
|
||||
## Step 5: Clean up
|
||||
|
||||
To delete the PostgreSQL cluster and the local K3D cluster:
|
||||
|
||||
```bash
|
||||
k3d cluster delete harmony-postgres-example
|
||||
```
|
||||
|
||||
Alternatively, just delete the PostgreSQL cluster without removing K3D:
|
||||
|
||||
```bash
|
||||
kubectl delete namespace harmony-postgres-example
|
||||
```
|
||||
|
||||
## How it works
|
||||
|
||||
The example code (`examples/postgresql/src/main.rs`) is straightforward:
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let postgres = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(),
|
||||
namespace: "harmony-postgres-example".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(postgres)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
- **`Inventory::autoload()`** discovers the local environment (or uses an existing inventory)
|
||||
- **`K8sAnywhereTopology::from_env()`** connects to K3D if `HARMONY_AUTOINSTALL=true` (the default), or to any Kubernetes cluster via `KUBECONFIG`
|
||||
- **`harmony_cli::run(...)`** executes the Score against the Topology, managing the full lifecycle
|
||||
|
||||
## Connecting to an existing cluster
|
||||
|
||||
By default, Harmony provisions a local K3D cluster. To use an existing Kubernetes cluster instead:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/path/to/your/kubeconfig
|
||||
export HARMONY_USE_LOCAL_K3D=false
|
||||
export HARMONY_AUTOINSTALL=false
|
||||
|
||||
cargo run -p example-postgresql
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Docker is not running
|
||||
|
||||
```
|
||||
Error: could not create cluster: docker is not running
|
||||
```
|
||||
|
||||
Start Docker and try again.
|
||||
|
||||
### K3D cluster creation fails
|
||||
|
||||
```
|
||||
Error: failed to create k3d cluster
|
||||
```
|
||||
|
||||
Ensure you have at least 2 CPU cores and 4 GiB of RAM available for Docker.
|
||||
|
||||
### `kubectl` cannot connect to the cluster
|
||||
|
||||
```
|
||||
error: unable to connect to a kubernetes cluster
|
||||
```
|
||||
|
||||
After Harmony creates the cluster, it writes the kubeconfig to `~/.kube/config` or to the path in `KUBECONFIG`. Verify:
|
||||
|
||||
```bash
|
||||
kubectl cluster-info --context k3d-harmony-postgres-example
|
||||
```
|
||||
|
||||
### Port forward fails
|
||||
|
||||
```
|
||||
error: unable to forward port
|
||||
```
|
||||
|
||||
Make sure no other process is using port 5432, or use a different local port:
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n harmony-postgres-example svc/harmony-postgres-example-rw 15432:5432
|
||||
psql -h localhost -p 15432 -U postgres
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Explore the Scores Catalog](../catalogs/scores.md): See what other Scores are available
|
||||
- [Explore the Topologies Catalog](../catalogs/topologies.md): See what infrastructure Topologies are supported
|
||||
- [Read the Core Concepts](../concepts.md): Understand the Score / Topology / Interpret pattern in depth
|
||||
- [OKD on Bare Metal](../use-cases/okd-on-bare-metal.md): See a complete bare-metal deployment example
|
||||
|
||||
## Advanced examples
|
||||
|
||||
Once you're comfortable with the basics, these examples demonstrate more advanced use cases. Note that some require specific infrastructure (existing Kubernetes clusters, bare-metal hardware, or multi-cluster environments):
|
||||
|
||||
| Example | Description | Prerequisites |
|
||||
|---------|-------------|---------------|
|
||||
| `monitoring` | Deploy Prometheus alerting with Discord webhooks | Existing K8s cluster |
|
||||
| `ntfy` | Deploy ntfy notification server | Existing K8s cluster |
|
||||
| `tenant` | Create a multi-tenant namespace with quotas | Existing K8s cluster |
|
||||
| `cert_manager` | Provision TLS certificates | Existing K8s cluster |
|
||||
| `validate_ceph_cluster_health` | Check Ceph cluster health | Existing Rook/Ceph cluster |
|
||||
| `okd_pxe` / `okd_installation` | Provision OKD on bare metal | HAClusterTopology, bare-metal hardware |
|
||||
|
||||
To run any example:
|
||||
|
||||
```bash
|
||||
cargo run -p example-<example_name>
|
||||
```
|
||||
|
||||
158
docs/guides/kubernetes-ingress.md
Normal file
158
docs/guides/kubernetes-ingress.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Ingress Resources in Harmony
|
||||
|
||||
Harmony generates standard Kubernetes `networking.k8s.io/v1` Ingress resources. This ensures your deployments are portable across any Kubernetes distribution (vanilla K8s, OKD/OpenShift, K3s, etc.) without requiring vendor-specific configurations.
|
||||
|
||||
By default, Harmony does **not** set `spec.ingressClassName`. This allows the cluster's default ingress controller to automatically claim the resource, which is the correct approach for most single-controller clusters.
|
||||
|
||||
---
|
||||
|
||||
## TLS Configurations
|
||||
|
||||
There are two portable TLS modes for Ingress resources. Use only these in your Harmony deployments.
|
||||
|
||||
### 1. Plain HTTP (No TLS)
|
||||
|
||||
Omit the `tls` block entirely. The Ingress serves traffic over plain HTTP. Use this for local development or when TLS is terminated elsewhere (e.g., by a service mesh or external load balancer).
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: my-app
|
||||
namespace: my-ns
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: my-app
|
||||
port:
|
||||
number: 8080
|
||||
```
|
||||
|
||||
### 2. HTTPS with a Named TLS Secret
|
||||
|
||||
Provide a `tls` block with both `hosts` and a `secretName`. The ingress controller will use that Secret for TLS termination. The Secret must be a `kubernetes.io/tls` type in the same namespace as the Ingress.
|
||||
|
||||
There are two ways to provide this Secret.
|
||||
|
||||
#### Option A: Manual Secret
|
||||
|
||||
Create the TLS Secret yourself before deploying the Ingress. This is suitable when certificates are issued outside the cluster or managed by another system.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: my-app
|
||||
namespace: my-ns
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: my-app
|
||||
port:
|
||||
number: 8080
|
||||
tls:
|
||||
- hosts:
|
||||
- app.example.com
|
||||
secretName: app-example-com-tls
|
||||
```
|
||||
|
||||
#### Option B: Automated via cert-manager (Recommended)
|
||||
|
||||
Add the `cert-manager.io/cluster-issuer` annotation to the Ingress. cert-manager will automatically perform the ACME challenge, generate the certificate, store it in the named Secret, and handle renewal. You do not create the Secret yourself.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: my-app
|
||||
namespace: my-ns
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: my-app
|
||||
port:
|
||||
number: 8080
|
||||
tls:
|
||||
- hosts:
|
||||
- app.example.com
|
||||
secretName: app-example-com-tls
|
||||
```
|
||||
|
||||
If you use a namespace-scoped `Issuer` instead of a `ClusterIssuer`, replace the annotation with `cert-manager.io/issuer: <name>`.
|
||||
|
||||
---
|
||||
|
||||
## Do Not Use: TLS Without `secretName`
|
||||
|
||||
Avoid TLS entries that omit `secretName`:
|
||||
|
||||
```yaml
|
||||
# ⚠️ Non-portable — do not use
|
||||
tls:
|
||||
- hosts:
|
||||
- app.example.com
|
||||
```
|
||||
|
||||
Behavior for this pattern is **controller-specific and not portable**. On OKD/OpenShift, the ingress-to-route translation rejects it as incomplete. On other controllers, it may silently serve a self-signed fallback or fail in unpredictable ways. Harmony does not support this pattern.
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites for cert-manager
|
||||
|
||||
To use automated certificates (Option B above):
|
||||
|
||||
1. **cert-manager** must be installed on the cluster.
|
||||
2. A `ClusterIssuer` or `Issuer` must exist. A typical Let's Encrypt production issuer:
|
||||
|
||||
```yaml
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: team@example.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-account-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress: {}
|
||||
```
|
||||
|
||||
3. **DNS must already resolve** to the cluster's ingress endpoint before the Ingress is created. The HTTP01 challenge requires this routing to be active.
|
||||
|
||||
For wildcard certificates (e.g. `*.example.com`), HTTP01 cannot be used — configure a DNS01 solver with credentials for your DNS provider instead.
|
||||
|
||||
---
|
||||
|
||||
## OKD / OpenShift Notes
|
||||
|
||||
On OKD, standard Ingress resources are automatically translated into OpenShift `Route` objects. The default TLS termination mode is `edge`, which is correct for most HTTP applications. To control this explicitly, add:
|
||||
|
||||
```yaml
|
||||
annotations:
|
||||
route.openshift.io/termination: edge # or passthrough / reencrypt
|
||||
```
|
||||
|
||||
This annotation is ignored on non-OpenShift clusters and is safe to include unconditionally.
|
||||
164
docs/guides/writing-a-score.md
Normal file
164
docs/guides/writing-a-score.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# Writing a Score
|
||||
|
||||
A `Score` declares _what_ you want to achieve. It is decoupled from _how_ it is achieved — that logic lives in an `Interpret`.
|
||||
|
||||
## The Pattern
|
||||
|
||||
A Score consists of two parts:
|
||||
|
||||
1. **A struct** — holds the configuration for your desired state
|
||||
2. **A `Score<T>` implementation** — returns an `Interpret` that knows how to execute
|
||||
|
||||
An `Interpret` contains the actual execution logic and connects your Score to the capabilities exposed by a `Topology`.
|
||||
|
||||
## Example: A Simple Score
|
||||
|
||||
Here's a simplified version of `NtfyScore` from the `ntfy` module:
|
||||
|
||||
```rust
|
||||
use async_trait::async_trait;
|
||||
use harmony::{
|
||||
interpret::{Interpret, InterpretError, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HelmCommand, K8sclient, Topology},
|
||||
};
|
||||
|
||||
/// MyScore declares "I want to install the ntfy server"
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MyScore {
|
||||
pub namespace: String,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for MyScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(MyInterpret { score: self.clone() })
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"ntfy [MyScore]".into()
|
||||
}
|
||||
}
|
||||
|
||||
/// MyInterpret knows _how_ to install ntfy using the Topology's capabilities
|
||||
#[derive(Debug)]
|
||||
pub struct MyInterpret {
|
||||
pub score: MyScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + K8sclient> Interpret<T> for MyInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
// 1. Get a Kubernetes client from the Topology
|
||||
let client = topology.k8s_client().await?;
|
||||
|
||||
// 2. Use Helm to install the ntfy chart
|
||||
// (via topology's HelmCommand capability)
|
||||
|
||||
// 3. Wait for the deployment to be ready
|
||||
client
|
||||
.wait_until_deployment_ready("ntfy", Some(&self.score.namespace), None)
|
||||
.await?;
|
||||
|
||||
Ok(Outcome::success("ntfy installed".to_string()))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## The Compile-Time Safety Check
|
||||
|
||||
The generic `Score<T>` trait is bounded by `T: Topology`. This means the compiler enforces that your Score only runs on Topologies that expose the capabilities your Interpret needs:
|
||||
|
||||
```rust
|
||||
// This only compiles if K8sAnywhereTopology (or any T)
|
||||
// implements HelmCommand and K8sclient
|
||||
impl<T: Topology + HelmCommand + K8sclient> Score<T> for MyScore { ... }
|
||||
```
|
||||
|
||||
If you try to run this Score against a Topology that doesn't expose `HelmCommand`, you get a compile error — before any code runs.
|
||||
|
||||
## Using Your Score
|
||||
|
||||
Once defined, your Score integrates with the Harmony CLI:
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let my_score = MyScore {
|
||||
namespace: "monitoring".to_string(),
|
||||
host: "ntfy.example.com".to_string(),
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(my_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### Composing Scores
|
||||
|
||||
Scores can include other Scores via features:
|
||||
|
||||
```rust
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(PackagingDeployment { application: app.clone() }),
|
||||
Box::new(Monitoring { application: app.clone(), alert_receiver: vec![] }),
|
||||
],
|
||||
application: app,
|
||||
};
|
||||
```
|
||||
|
||||
### Reusing Interpret Logic
|
||||
|
||||
Many Scores delegate to shared `Interpret` implementations. For example, `HelmChartScore` provides a reusable Interpret for any Helm-based deployment. Your Score can wrap it:
|
||||
|
||||
```rust
|
||||
impl<T: Topology + HelmCommand> Score<T> for MyScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(HelmChartInterpret { /* your config */ })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Accessing Topology Capabilities
|
||||
|
||||
Your Interpret accesses infrastructure through Capabilities exposed by the Topology:
|
||||
|
||||
```rust
|
||||
// Via the Topology trait directly
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
let helm = topology.get_helm_command();
|
||||
|
||||
// Or via Capability traits
|
||||
impl<T: Topology + K8sclient> Interpret<T> for MyInterpret {
|
||||
async fn execute(...) {
|
||||
let client = topology.k8s_client().await?;
|
||||
// use client...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Keep Scores focused** — one Score per concern (deployment, monitoring, networking)
|
||||
- **Use `..Default::default()`** for optional fields so callers only need to specify what they care about
|
||||
- **Return `Outcome`** — use `Outcome::success`, `Outcome::failure`, or `Outcome::success_with_details` to communicate results clearly
|
||||
- **Handle errors gracefully** — return meaningful `InterpretError` messages that help operators debug issues
|
||||
176
docs/guides/writing-a-topology.md
Normal file
176
docs/guides/writing-a-topology.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# Writing a Topology
|
||||
|
||||
A `Topology` models your infrastructure environment and exposes `Capability` traits that Scores use to interact with it. Where a Score declares _what_ you want, a Topology exposes _what_ it can do.
|
||||
|
||||
## The Minimum Implementation
|
||||
|
||||
At minimum, a Topology needs:
|
||||
|
||||
```rust
|
||||
use async_trait::async_trait;
|
||||
use harmony::{
|
||||
topology::{PreparationError, PreparationOutcome, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MyTopology {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for MyTopology {
|
||||
fn name(&self) -> &str {
|
||||
"MyTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
// Verify the infrastructure is accessible and ready
|
||||
Ok(PreparationOutcome::Success { details: "ready".to_string() })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Implementing Capabilities
|
||||
|
||||
Scores express dependencies on Capabilities through trait bounds. For example, if your Topology should support Scores that deploy Helm charts, implement `HelmCommand`:
|
||||
|
||||
```rust
|
||||
use std::process::Command;
|
||||
use harmony::topology::HelmCommand;
|
||||
|
||||
impl HelmCommand for MyTopology {
|
||||
fn get_helm_command(&self) -> Command {
|
||||
let mut cmd = Command::new("helm");
|
||||
if let Some(kubeconfig) = &self.kubeconfig {
|
||||
cmd.arg("--kubeconfig").arg(kubeconfig);
|
||||
}
|
||||
cmd
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For Scores that need a Kubernetes client, implement `K8sclient`:
|
||||
|
||||
```rust
|
||||
use std::sync::Arc;
|
||||
use harmony_k8s::K8sClient;
|
||||
use harmony::topology::K8sclient;
|
||||
|
||||
#[async_trait]
|
||||
impl K8sclient for MyTopology {
|
||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||
let client = if let Some(kubeconfig) = &self.kubeconfig {
|
||||
K8sClient::from_kubeconfig(kubeconfig).await?
|
||||
} else {
|
||||
K8sClient::try_default().await?
|
||||
};
|
||||
Ok(Arc::new(client))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Loading Topology from Environment
|
||||
|
||||
For flexibility, implement `from_env()` to read configuration from environment variables:
|
||||
|
||||
```rust
|
||||
impl MyTopology {
|
||||
pub fn from_env() -> Self {
|
||||
Self {
|
||||
name: std::env::var("MY_TOPOLOGY_NAME")
|
||||
.unwrap_or_else(|_| "default".to_string()),
|
||||
kubeconfig: std::env::var("KUBECONFIG").ok(),
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This pattern lets operators switch between environments without recompiling:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/path/to/prod-cluster.kubeconfig
|
||||
cargo run --example my_example
|
||||
```
|
||||
|
||||
## Complete Example: K8sAnywhereTopology
|
||||
|
||||
The `K8sAnywhereTopology` is the most commonly used Topology and handles both local (K3D) and remote Kubernetes clusters:
|
||||
|
||||
```rust
|
||||
pub struct K8sAnywhereTopology {
|
||||
pub k8s_state: Arc<OnceCell<K8sState>>,
|
||||
pub tenant_manager: Arc<OnceCell<TenantManager>>,
|
||||
pub config: Arc<K8sAnywhereConfig>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for K8sAnywhereTopology {
|
||||
fn name(&self) -> &str {
|
||||
"K8sAnywhereTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
// 1. If autoinstall is enabled and no cluster exists, provision K3D
|
||||
// 2. Verify kubectl connectivity
|
||||
// 3. Optionally wait for cluster operators to be ready
|
||||
Ok(PreparationOutcome::Success { details: "cluster ready".to_string() })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### Lazy Initialization
|
||||
|
||||
Use `OnceCell` for expensive resources like Kubernetes clients:
|
||||
|
||||
```rust
|
||||
pub struct K8sAnywhereTopology {
|
||||
k8s_state: Arc<OnceCell<K8sState>>,
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Target Topologies
|
||||
|
||||
For Scores that span multiple clusters (like NATS supercluster), implement `MultiTargetTopology`:
|
||||
|
||||
```rust
|
||||
pub trait MultiTargetTopology: Topology {
|
||||
fn current_target(&self) -> &str;
|
||||
fn set_target(&mut self, target: &str);
|
||||
}
|
||||
```
|
||||
|
||||
### Composing Topologies
|
||||
|
||||
Complex topologies combine multiple infrastructure components:
|
||||
|
||||
```rust
|
||||
pub struct HAClusterTopology {
|
||||
pub router: Arc<dyn Router>,
|
||||
pub load_balancer: Arc<dyn LoadBalancer>,
|
||||
pub firewall: Arc<dyn Firewall>,
|
||||
pub dhcp_server: Arc<dyn DhcpServer>,
|
||||
pub dns_server: Arc<dyn DnsServer>,
|
||||
pub kubeconfig: Option<String>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Your Topology
|
||||
|
||||
Test Topologies in isolation by implementing them against mock infrastructure:
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_topology_ensure_ready() {
|
||||
let topo = MyTopology::from_env();
|
||||
let result = topo.ensure_ready().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,443 +0,0 @@
|
||||
# Monitoring and Alerting in Harmony
|
||||
|
||||
Harmony provides a unified, type-safe approach to monitoring and alerting across Kubernetes, OpenShift, and bare-metal infrastructure. This guide explains the architecture and how to use it at different levels of abstraction.
|
||||
|
||||
## Overview
|
||||
|
||||
Harmony's monitoring module supports three distinct use cases:
|
||||
|
||||
| Level | Who Uses It | What It Provides |
|
||||
|-------|-------------|------------------|
|
||||
| **Cluster** | Cluster administrators | Full control over monitoring stack, cluster-wide alerts, external scrape targets |
|
||||
| **Tenant** | Platform teams | Namespace-scoped monitoring in multi-tenant environments |
|
||||
| **Application** | Application developers | Zero-config monitoring that "just works" |
|
||||
|
||||
Each level builds on the same underlying abstractions, ensuring consistency while providing appropriate complexity for each audience.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### AlertSender
|
||||
|
||||
An `AlertSender` represents the system that evaluates alert rules and sends notifications. Harmony supports multiple monitoring stacks:
|
||||
|
||||
| Sender | Description | Use When |
|
||||
|--------|-------------|----------|
|
||||
| `OpenshiftClusterAlertSender` | OKD/OpenShift built-in monitoring | Running on OKD/OpenShift |
|
||||
| `KubePrometheus` | kube-prometheus-stack via Helm | Standard Kubernetes, need full stack |
|
||||
| `Prometheus` | Standalone Prometheus | Custom Prometheus deployment |
|
||||
| `RedHatClusterObservability` | RHOB operator | Red Hat managed clusters |
|
||||
| `Grafana` | Grafana-managed alerting | Grafana as primary alerting layer |
|
||||
|
||||
### AlertReceiver
|
||||
|
||||
An `AlertReceiver` defines where alerts are sent (Discord, Slack, email, webhook, etc.). Receivers are parameterized by sender type because each monitoring stack has different configuration formats.
|
||||
|
||||
```rust
|
||||
pub trait AlertReceiver<S: AlertSender> {
|
||||
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
```
|
||||
|
||||
Built-in receivers:
|
||||
- `DiscordReceiver` - Discord webhooks
|
||||
- `WebhookReceiver` - Generic HTTP webhooks
|
||||
|
||||
### AlertRule
|
||||
|
||||
An `AlertRule` defines a Prometheus alert expression. Rules are also parameterized by sender to handle different CRD formats.
|
||||
|
||||
```rust
|
||||
pub trait AlertRule<S: AlertSender> {
|
||||
fn build_rule(&self) -> Result<serde_json::Value, InterpretError>;
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
```
|
||||
|
||||
### Observability Capability
|
||||
|
||||
Topologies implement `Observability<S>` to indicate they support a specific alert sender:
|
||||
|
||||
```rust
|
||||
impl Observability<OpenshiftClusterAlertSender> for K8sAnywhereTopology {
|
||||
async fn install_receivers(&self, sender, inventory, receivers) { ... }
|
||||
async fn install_rules(&self, sender, inventory, rules) { ... }
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
This provides **compile-time verification**: if you try to use `OpenshiftClusterAlertScore` with a topology that doesn't implement `Observability<OpenshiftClusterAlertSender>`, the code won't compile.
|
||||
|
||||
---
|
||||
|
||||
## Level 1: Cluster Monitoring
|
||||
|
||||
Cluster monitoring is for administrators who need full control over the monitoring infrastructure. This includes:
|
||||
- Installing/managing the monitoring stack
|
||||
- Configuring cluster-wide alert receivers
|
||||
- Defining cluster-level alert rules
|
||||
- Adding external scrape targets (e.g., bare-metal servers, firewalls)
|
||||
|
||||
### Example: OKD Cluster Alerts
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
modules::monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordReceiver,
|
||||
alert_rule::{alerts::k8s::pvc::high_pvc_fill_rate_over_two_days, prometheus_alert_rule::AlertManagerRuleGroup},
|
||||
okd::openshift_cluster_alerting_score::OpenshiftClusterAlertScore,
|
||||
scrape_target::prometheus_node_exporter::PrometheusNodeExporter,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, monitoring::{AlertMatcher, AlertRoute, MatchOp}},
|
||||
};
|
||||
|
||||
let severity_matcher = AlertMatcher {
|
||||
label: "severity".to_string(),
|
||||
operator: MatchOp::Eq,
|
||||
value: "critical".to_string(),
|
||||
};
|
||||
|
||||
let rule_group = AlertManagerRuleGroup::new(
|
||||
"cluster-rules",
|
||||
vec![high_pvc_fill_rate_over_two_days()],
|
||||
);
|
||||
|
||||
let external_exporter = PrometheusNodeExporter {
|
||||
job_name: "firewall".to_string(),
|
||||
metrics_path: "/metrics".to_string(),
|
||||
listen_address: ip!("192.168.1.1"),
|
||||
port: 9100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(OpenshiftClusterAlertScore {
|
||||
sender: OpenshiftClusterAlertSender,
|
||||
receivers: vec![Box::new(DiscordReceiver {
|
||||
name: "critical-alerts".to_string(),
|
||||
url: hurl!("https://discord.com/api/webhooks/..."),
|
||||
route: AlertRoute {
|
||||
matchers: vec![severity_matcher],
|
||||
..AlertRoute::default("critical-alerts".to_string())
|
||||
},
|
||||
})],
|
||||
rules: vec![Box::new(rule_group)],
|
||||
scrape_targets: Some(vec![Box::new(external_exporter)]),
|
||||
})],
|
||||
None,
|
||||
).await?;
|
||||
```
|
||||
|
||||
### What This Does
|
||||
|
||||
1. **Enables cluster monitoring** - Activates OKD's built-in Prometheus
|
||||
2. **Enables user workload monitoring** - Allows namespace-scoped rules
|
||||
3. **Configures Alertmanager** - Adds Discord receiver with route matching
|
||||
4. **Deploys alert rules** - Creates `AlertingRule` CRD with PVC fill rate alert
|
||||
5. **Adds external scrape target** - Configures Prometheus to scrape the firewall
|
||||
|
||||
### Compile-Time Safety
|
||||
|
||||
The `OpenshiftClusterAlertScore` requires:
|
||||
|
||||
```rust
|
||||
impl<T: Topology + Observability<OpenshiftClusterAlertSender>> Score<T>
|
||||
for OpenshiftClusterAlertScore
|
||||
```
|
||||
|
||||
If `K8sAnywhereTopology` didn't implement `Observability<OpenshiftClusterAlertSender>`, this code would fail to compile. You cannot accidentally deploy OKD alerts to a cluster that doesn't support them.
|
||||
|
||||
---
|
||||
|
||||
## Level 2: Tenant Monitoring
|
||||
|
||||
In multi-tenant clusters, teams are often confined to specific namespaces. Tenant monitoring adapts to this constraint:
|
||||
|
||||
- Resources are deployed in the tenant's namespace
|
||||
- Cannot modify cluster-level monitoring configuration
|
||||
- The topology determines namespace context at runtime
|
||||
|
||||
### How It Works
|
||||
|
||||
The topology's `Observability` implementation handles tenant scoping:
|
||||
|
||||
```rust
|
||||
impl Observability<KubePrometheus> for K8sAnywhereTopology {
|
||||
async fn install_rules(&self, sender, inventory, rules) {
|
||||
// Topology knows if it's tenant-scoped
|
||||
let namespace = self.get_tenant_config().await
|
||||
.map(|t| t.name)
|
||||
.unwrap_or_else(|| "monitoring".to_string());
|
||||
|
||||
// Rules are installed in the appropriate namespace
|
||||
for rule in rules.unwrap_or_default() {
|
||||
let score = KubePrometheusRuleScore {
|
||||
sender: sender.clone(),
|
||||
rule,
|
||||
namespace: namespace.clone(), // Tenant namespace
|
||||
};
|
||||
score.create_interpret().execute(inventory, self).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Tenant vs Cluster Resources
|
||||
|
||||
| Resource | Cluster-Level | Tenant-Level |
|
||||
|----------|---------------|--------------|
|
||||
| Alertmanager config | Global receivers | Namespaced receivers (where supported) |
|
||||
| PrometheusRules | Cluster-wide alerts | Namespace alerts only |
|
||||
| ServiceMonitors | Any namespace | Own namespace only |
|
||||
| External scrape targets | Can add | Cannot add (cluster config) |
|
||||
|
||||
### Runtime Validation
|
||||
|
||||
Tenant constraints are validated at runtime via Kubernetes RBAC. If a tenant-scoped deployment attempts cluster-level operations, it fails with a clear permission error from the Kubernetes API.
|
||||
|
||||
This cannot be fully compile-time because tenant context is determined by who's running the code and what permissions they have—information only available at runtime.
|
||||
|
||||
---
|
||||
|
||||
## Level 3: Application Monitoring
|
||||
|
||||
Application monitoring provides zero-config, opinionated monitoring for developers. Just add the `Monitoring` feature to your application and it works.
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use harmony::modules::{
|
||||
application::{Application, ApplicationFeature},
|
||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
||||
};
|
||||
|
||||
// Define your application
|
||||
let my_app = MyApplication::new();
|
||||
|
||||
// Add monitoring as a feature
|
||||
let monitoring = Monitoring {
|
||||
application: Arc::new(my_app),
|
||||
alert_receiver: vec![], // Uses defaults
|
||||
};
|
||||
|
||||
// Install with the application
|
||||
my_app.add_feature(monitoring);
|
||||
```
|
||||
|
||||
### What Application Monitoring Provides
|
||||
|
||||
1. **Automatic ServiceMonitor** - Creates a ServiceMonitor for your application's pods
|
||||
2. **Ntfy Notification Channel** - Auto-installs and configures Ntfy for push notifications
|
||||
3. **Tenant Awareness** - Automatically scopes to the correct namespace
|
||||
4. **Sensible Defaults** - Pre-configured alert routes and receivers
|
||||
|
||||
### Under the Hood
|
||||
|
||||
```rust
|
||||
impl<T: Topology + Observability<Prometheus> + TenantManager>
|
||||
ApplicationFeature<T> for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<...> {
|
||||
// 1. Get tenant namespace (or use app name)
|
||||
let namespace = topology.get_tenant_config().await
|
||||
.map(|ns| ns.name.clone())
|
||||
.unwrap_or_else(|| self.application.name());
|
||||
|
||||
// 2. Create ServiceMonitor for the app
|
||||
let app_service_monitor = ServiceMonitor {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(self.application.name()),
|
||||
namespace: Some(namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: ServiceMonitorSpec::default(),
|
||||
};
|
||||
|
||||
// 3. Install Ntfy for notifications
|
||||
let ntfy = NtfyScore { namespace, host };
|
||||
ntfy.interpret(&Inventory::empty(), topology).await?;
|
||||
|
||||
// 4. Wire up webhook receiver to Ntfy
|
||||
let ntfy_receiver = WebhookReceiver { ... };
|
||||
|
||||
// 5. Execute monitoring score
|
||||
alerting_score.interpret(&Inventory::empty(), topology).await?;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pre-Built Alert Rules
|
||||
|
||||
Harmony provides a library of common alert rules in `modules/monitoring/alert_rule/alerts/`:
|
||||
|
||||
### Kubernetes Alerts (`alerts/k8s/`)
|
||||
|
||||
```rust
|
||||
use harmony::modules::monitoring::alert_rule::alerts::k8s::{
|
||||
pod::pod_failed,
|
||||
pvc::high_pvc_fill_rate_over_two_days,
|
||||
memory_usage::alert_high_memory_usage,
|
||||
};
|
||||
|
||||
let rules = AlertManagerRuleGroup::new("k8s-rules", vec![
|
||||
pod_failed(),
|
||||
high_pvc_fill_rate_over_two_days(),
|
||||
alert_high_memory_usage(),
|
||||
]);
|
||||
```
|
||||
|
||||
Available rules:
|
||||
- `pod_failed()` - Pod in failed state
|
||||
- `alert_container_restarting()` - Container restart loop
|
||||
- `alert_pod_not_ready()` - Pod not ready for extended period
|
||||
- `high_pvc_fill_rate_over_two_days()` - PVC will fill within 2 days
|
||||
- `alert_high_memory_usage()` - Memory usage above threshold
|
||||
- `alert_high_cpu_usage()` - CPU usage above threshold
|
||||
|
||||
### Infrastructure Alerts (`alerts/infra/`)
|
||||
|
||||
```rust
|
||||
use harmony::modules::monitoring::alert_rule::alerts::infra::opnsense::high_http_error_rate;
|
||||
|
||||
let rules = AlertManagerRuleGroup::new("infra-rules", vec![
|
||||
high_http_error_rate(),
|
||||
]);
|
||||
```
|
||||
|
||||
### Creating Custom Rules
|
||||
|
||||
```rust
|
||||
use harmony::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
|
||||
|
||||
pub fn my_custom_alert() -> PrometheusAlertRule {
|
||||
PrometheusAlertRule::new("MyServiceDown", "up{job=\"my-service\"} == 0")
|
||||
.for_duration("5m")
|
||||
.label("severity", "critical")
|
||||
.annotation("summary", "My service is down")
|
||||
.annotation("description", "The my-service job has been down for more than 5 minutes")
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Alert Receivers
|
||||
|
||||
### Discord Webhook
|
||||
|
||||
```rust
|
||||
use harmony::modules::monitoring::alert_channel::discord_alert_channel::DiscordReceiver;
|
||||
use harmony::topology::monitoring::{AlertRoute, AlertMatcher, MatchOp};
|
||||
|
||||
let discord = DiscordReceiver {
|
||||
name: "ops-alerts".to_string(),
|
||||
url: hurl!("https://discord.com/api/webhooks/123456/abcdef"),
|
||||
route: AlertRoute {
|
||||
receiver: "ops-alerts".to_string(),
|
||||
matchers: vec![AlertMatcher {
|
||||
label: "severity".to_string(),
|
||||
operator: MatchOp::Eq,
|
||||
value: "critical".to_string(),
|
||||
}],
|
||||
group_by: vec!["alertname".to_string()],
|
||||
repeat_interval: Some("30m".to_string()),
|
||||
continue_matching: false,
|
||||
children: vec![],
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
### Generic Webhook
|
||||
|
||||
```rust
|
||||
use harmony::modules::monitoring::alert_channel::webhook_receiver::WebhookReceiver;
|
||||
|
||||
let webhook = WebhookReceiver {
|
||||
name: "custom-webhook".to_string(),
|
||||
url: hurl!("https://api.example.com/alerts"),
|
||||
route: AlertRoute::default("custom-webhook".to_string()),
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Adding a New Monitoring Stack
|
||||
|
||||
To add support for a new monitoring stack:
|
||||
|
||||
1. **Create the sender type** in `modules/monitoring/my_sender/mod.rs`:
|
||||
```rust
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MySender;
|
||||
|
||||
impl AlertSender for MySender {
|
||||
fn name(&self) -> String { "MySender".to_string() }
|
||||
}
|
||||
```
|
||||
|
||||
2. **Define CRD types** in `modules/monitoring/my_sender/crd/`:
|
||||
```rust
|
||||
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone)]
|
||||
#[kube(group = "monitoring.example.com", version = "v1", kind = "MyAlertRule")]
|
||||
pub struct MyAlertRuleSpec { ... }
|
||||
```
|
||||
|
||||
3. **Implement Observability** in `domain/topology/k8s_anywhere/observability/my_sender.rs`:
|
||||
```rust
|
||||
impl Observability<MySender> for K8sAnywhereTopology {
|
||||
async fn install_receivers(&self, sender, inventory, receivers) { ... }
|
||||
async fn install_rules(&self, sender, inventory, rules) { ... }
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
4. **Implement receiver conversions** for existing receivers:
|
||||
```rust
|
||||
impl AlertReceiver<MySender> for DiscordReceiver {
|
||||
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError> {
|
||||
// Convert DiscordReceiver to MySender's format
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
5. **Create score types**:
|
||||
```rust
|
||||
pub struct MySenderAlertScore {
|
||||
pub sender: MySender,
|
||||
pub receivers: Vec<Box<dyn AlertReceiver<MySender>>>,
|
||||
pub rules: Vec<Box<dyn AlertRule<MySender>>>,
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Architecture Principles
|
||||
|
||||
### Type Safety Over Flexibility
|
||||
|
||||
Each monitoring stack has distinct CRDs and configuration formats. Rather than a unified "MonitoringStack" type that loses stack-specific features, we use generic traits that provide type safety while allowing each stack to express its unique configuration.
|
||||
|
||||
### Compile-Time Capability Verification
|
||||
|
||||
The `Observability<S>` bound ensures you can't deploy OKD alerts to a KubePrometheus cluster. The compiler catches platform mismatches before deployment.
|
||||
|
||||
### Explicit Over Implicit
|
||||
|
||||
Monitoring stacks are chosen explicitly (`OpenshiftClusterAlertSender` vs `KubePrometheus`). There's no "auto-detection" that could lead to surprising behavior.
|
||||
|
||||
### Three Levels, One Foundation
|
||||
|
||||
Cluster, tenant, and application monitoring all use the same traits (`AlertSender`, `AlertReceiver`, `AlertRule`). The difference is in how scores are constructed and how topologies interpret them.
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [ADR-020: Monitoring and Alerting Architecture](../adr/020-monitoring-alerting-architecture.md)
|
||||
- [ADR-013: Monitoring Notifications (ntfy)](../adr/013-monitoring-notifications.md)
|
||||
- [ADR-011: Multi-Tenant Cluster Architecture](../adr/011-multi-tenant-cluster.md)
|
||||
- [Coding Guide](coding-guide.md)
|
||||
- [Core Concepts](concepts.md)
|
||||
16
docs/one_liners.md
Normal file
16
docs/one_liners.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Handy one liners for infrastructure management
|
||||
|
||||
### Delete all evicted pods from a cluster
|
||||
|
||||
```sh
|
||||
kubectl get po -A | grep Evic | awk '{ print "-n " $1 " " $2 }' | xargs -L 1 kubectl delete po
|
||||
```
|
||||
> Pods are evicted when the node they are running on lacks the ressources to keep them going. The most common case is when ephemeral storage becomes too full because of something like a log file getting too big.
|
||||
>
|
||||
> It could also happen because of memory or cpu pressure due to unpredictable workloads.
|
||||
>
|
||||
> This means it is generally ok to delete them.
|
||||
>
|
||||
> However, in a perfectly configured deployment and cluster, pods should rarely, if ever, get evicted. For example, a log file getting too big should be reconfigured not to use too much space, or the deployment should be configured to reserve the correct amount of ephemeral storage space.
|
||||
>
|
||||
> Note that deleting evicted pods do not solve the underlying issue, make sure to understand why the pod was evicted in the first place and put the proper solution in place.
|
||||
17
docs/use-cases/README.md
Normal file
17
docs/use-cases/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Use Cases
|
||||
|
||||
Real-world scenarios demonstrating Harmony in action.
|
||||
|
||||
## Available Use Cases
|
||||
|
||||
### [PostgreSQL on Local K3D](./postgresql-on-local-k3d.md)
|
||||
|
||||
Deploy a fully functional PostgreSQL cluster on a local K3D cluster in under 10 minutes. The quickest way to see Harmony in action.
|
||||
|
||||
### [OKD on Bare Metal](./okd-on-bare-metal.md)
|
||||
|
||||
A complete walkthrough of bootstrapping a high-availability OKD cluster from physical hardware. Covers inventory discovery, bootstrap, control plane, and worker provisioning.
|
||||
|
||||
---
|
||||
|
||||
_These use cases are community-tested scenarios. For questions or contributions, open an issue on the [Harmony repository](https://git.nationtech.io/NationTech/harmony/issues)._
|
||||
159
docs/use-cases/okd-on-bare-metal.md
Normal file
159
docs/use-cases/okd-on-bare-metal.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# Use Case: OKD on Bare Metal
|
||||
|
||||
Provision a production-grade OKD (OpenShift Kubernetes Distribution) cluster from physical hardware using Harmony. This use case covers the full lifecycle: hardware discovery, bootstrap, control plane, workers, and post-install validation.
|
||||
|
||||
## What you'll have at the end
|
||||
|
||||
A highly-available OKD cluster with:
|
||||
- 3 control plane nodes
|
||||
- 2+ worker nodes
|
||||
- Network bonding configured on nodes and switches
|
||||
- Load balancer routing API and ingress traffic
|
||||
- DNS and DHCP services for the cluster
|
||||
- Post-install health validation
|
||||
|
||||
## Target hardware model
|
||||
|
||||
This setup assumes a typical lab environment:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Network 192.168.x.0/24 (flat, DHCP + PXE capable) │
|
||||
│ │
|
||||
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ cp0 │ │ cp1 │ │ cp2 │ (control) │
|
||||
│ └──────────┘ └──────────┘ └──────────┘ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ wk0 │ │ wk1 │ ... (workers) │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
│ ┌──────────┐ │
|
||||
│ │ bootstrap│ (temporary, can be repurposed) │
|
||||
│ └──────────┘ │
|
||||
│ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ firewall │ │ switch │ (OPNsense + Brocade) │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Required infrastructure
|
||||
|
||||
Harmony models this as an `HAClusterTopology`, which requires these capabilities:
|
||||
|
||||
| Capability | Implementation |
|
||||
|------------|---------------|
|
||||
| **Router** | OPNsense firewall |
|
||||
| **Load Balancer** | OPNsense HAProxy |
|
||||
| **Firewall** | OPNsense |
|
||||
| **DHCP Server** | OPNsense |
|
||||
| **TFTP Server** | OPNsense |
|
||||
| **HTTP Server** | OPNsense |
|
||||
| **DNS Server** | OPNsense |
|
||||
| **Node Exporter** | Prometheus node_exporter on OPNsense |
|
||||
| **Switch Client** | Brocade SNMP |
|
||||
|
||||
See `examples/okd_installation/` for a reference topology implementation.
|
||||
|
||||
## The Provisioning Pipeline
|
||||
|
||||
Harmony orchestrates OKD installation in ordered stages:
|
||||
|
||||
### Stage 1: Inventory Discovery (`OKDSetup01InventoryScore`)
|
||||
|
||||
Harmony boots all nodes via PXE into a CentOS Stream live environment, runs an inventory agent on each, and collects:
|
||||
- MAC addresses and NIC details
|
||||
- IP addresses assigned by DHCP
|
||||
- Hardware profile (CPU, RAM, storage)
|
||||
|
||||
This is the "discovery-first" approach: no pre-configuration required on nodes.
|
||||
|
||||
### Stage 2: Bootstrap Node (`OKDSetup02BootstrapScore`)
|
||||
|
||||
The user selects one discovered node to serve as the bootstrap node. Harmony:
|
||||
- Renders per-MAC iPXE boot configuration with OKD 4.19 SCOS live assets + ignition
|
||||
- Reboots the bootstrap node via SSH
|
||||
- Waits for the bootstrap process to complete (API server becomes available)
|
||||
|
||||
### Stage 3: Control Plane (`OKDSetup03ControlPlaneScore`)
|
||||
|
||||
With bootstrap complete, Harmony provisions the control plane nodes:
|
||||
- Renders per-MAC iPXE for each control plane node
|
||||
- Reboots via SSH and waits for node to join the cluster
|
||||
- Applies network bond configuration via NMState MachineConfig where relevant
|
||||
|
||||
### Stage 4: Network Bonding (`OKDSetupPersistNetworkBondScore`)
|
||||
|
||||
Configures LACP bonds on nodes and corresponding port-channels on the switch stack for high-availability.
|
||||
|
||||
### Stage 5: Worker Nodes (`OKDSetup04WorkersScore`)
|
||||
|
||||
Provisions worker nodes similarly to control plane, joining them to the cluster.
|
||||
|
||||
### Stage 6: Sanity Check (`OKDSetup05SanityCheckScore`)
|
||||
|
||||
Validates:
|
||||
- API server is reachable
|
||||
- Ingress controller is operational
|
||||
- Cluster operators are healthy
|
||||
- SDN (software-defined networking) is functional
|
||||
|
||||
### Stage 7: Installation Report (`OKDSetup06InstallationReportScore`)
|
||||
|
||||
Produces a machine-readable JSON report and human-readable summary of the installation.
|
||||
|
||||
## Network notes
|
||||
|
||||
**During discovery:** Ports must be in access mode (no LACP). DHCP succeeds; iPXE loads CentOS Stream live with Kickstart and starts the inventory endpoint.
|
||||
|
||||
**During provisioning:** After SCOS is on disk and Ignition/MachineConfig can be applied, bonds are set persistently. This avoids the PXE/DHCP recovery race condition that occurs if bonding is configured too early.
|
||||
|
||||
**PXE limitation:** The generic discovery path cannot use bonded networks for PXE boot because the DHCP recovery process conflicts with bond formation.
|
||||
|
||||
## Configuration knobs
|
||||
|
||||
When using `OKDInstallationPipeline`, configure these domains:
|
||||
|
||||
| Parameter | Example | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `public_domain` | `apps.example.com` | Wildcard domain for application ingress |
|
||||
| `internal_domain` | `cluster.local` | Internal cluster DNS domain |
|
||||
|
||||
## Running the example
|
||||
|
||||
See `examples/okd_installation/` for a complete reference. The topology must be configured with your infrastructure details:
|
||||
|
||||
```bash
|
||||
# Configure the example with your hardware/network specifics
|
||||
# See examples/okd_installation/src/topology.rs
|
||||
|
||||
cargo run -p example-okd_installation
|
||||
```
|
||||
|
||||
This example requires:
|
||||
- Physical hardware configured as described above
|
||||
- OPNsense firewall with SSH access
|
||||
- Brocade switch with SNMP access
|
||||
- All nodes connected to the same Layer 2 network
|
||||
|
||||
## Post-install
|
||||
|
||||
After the cluster is bootstrapped, `~/.kube/config` is updated with the cluster credentials. Verify:
|
||||
|
||||
```bash
|
||||
kubectl get nodes
|
||||
kubectl get pods -n openshift-monitoring
|
||||
oc get routes -n openshift-console
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
- Enable monitoring with `PrometheusAlertScore` or `OpenshiftClusterAlertScore`
|
||||
- Configure TLS certificates with `CertManagerHelmScore`
|
||||
- Add storage with Rook Ceph
|
||||
- Scale workers with `OKDSetup04WorkersScore`
|
||||
|
||||
## Further reading
|
||||
|
||||
- [OKD Installation Module](../../harmony/src/modules/okd/installation.rs) — source of truth for pipeline stages
|
||||
- [HAClusterTopology](../../harmony/src/domain/topology/ha_cluster.rs) — infrastructure capability model
|
||||
- [Scores Catalog](../catalogs/scores.md) — all available Scores including OKD-specific ones
|
||||
115
docs/use-cases/postgresql-on-local-k3d.md
Normal file
115
docs/use-cases/postgresql-on-local-k3d.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Use Case: PostgreSQL on Local K3D
|
||||
|
||||
Deploy a production-grade PostgreSQL cluster on a local Kubernetes cluster (K3D) using Harmony. This is the fastest way to get started with Harmony and requires no external infrastructure.
|
||||
|
||||
## What you'll have at the end
|
||||
|
||||
A fully operational PostgreSQL cluster with:
|
||||
- 1 primary instance with 1 GiB of storage
|
||||
- CloudNativePG operator managing the cluster lifecycle
|
||||
- Automatic failover support (foundation for high-availability)
|
||||
- Exposed as a Kubernetes Service for easy connection
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Rust 2024 edition
|
||||
- Docker running locally
|
||||
- ~5 minutes
|
||||
|
||||
## The Score
|
||||
|
||||
The entire deployment is expressed in ~20 lines of Rust:
|
||||
|
||||
```rust
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let postgres = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "harmony-postgres-example".to_string(),
|
||||
namespace: "harmony-postgres-example".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(postgres)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
## What Harmony does
|
||||
|
||||
When you run this, Harmony:
|
||||
|
||||
1. **Connects to K8sAnywhereTopology** — this auto-provisions a K3D cluster if none exists
|
||||
2. **Installs the CloudNativePG operator** — one-time setup that enables PostgreSQL cluster management in Kubernetes
|
||||
3. **Creates a PostgreSQL cluster** — Harmony translates the Score into a `Cluster` CRD and applies it
|
||||
4. **Exposes the database** — creates a Kubernetes Service for the PostgreSQL primary
|
||||
|
||||
## Running it
|
||||
|
||||
```bash
|
||||
cargo run -p example-postgresql
|
||||
```
|
||||
|
||||
## Verifying the deployment
|
||||
|
||||
```bash
|
||||
# Check pods
|
||||
kubectl get pods -n harmony-postgres-example
|
||||
|
||||
# Get the password
|
||||
PASSWORD=$(kubectl get secret -n harmony-postgres-example \
|
||||
harmony-postgres-example-db-user \
|
||||
-o jsonpath='{.data.password}' | base64 -d)
|
||||
|
||||
# Connect via port-forward
|
||||
kubectl port-forward -n harmony-postgres-example svc/harmony-postgres-example-rw 5432:5432
|
||||
psql -h localhost -p 5432 -U postgres -W "$PASSWORD"
|
||||
```
|
||||
|
||||
## Customizing the deployment
|
||||
|
||||
The `PostgreSQLConfig` struct supports:
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `cluster_name` | — | Name of the PostgreSQL cluster |
|
||||
| `namespace` | — | Kubernetes namespace to deploy to |
|
||||
| `instances` | `1` | Number of instances |
|
||||
| `storage_size` | `1Gi` | Persistent storage size per instance |
|
||||
|
||||
Example with custom settings:
|
||||
|
||||
```rust
|
||||
let postgres = PostgreSQLScore {
|
||||
config: PostgreSQLConfig {
|
||||
cluster_name: "my-prod-db".to_string(),
|
||||
namespace: "database".to_string(),
|
||||
instances: 3,
|
||||
storage_size: "10Gi".to_string().into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
## Extending the pattern
|
||||
|
||||
This pattern extends to any Kubernetes-native workload:
|
||||
|
||||
- Add **monitoring** by including a `Monitoring` feature alongside your Score
|
||||
- Add **TLS certificates** by including a `CertificateScore`
|
||||
- Add **tenant isolation** by wrapping in a `TenantScore`
|
||||
|
||||
See [Scores Catalog](../catalogs/scores.md) for the full list.
|
||||
127
examples/README.md
Normal file
127
examples/README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Examples
|
||||
|
||||
This directory contains runnable examples demonstrating Harmony's capabilities. Each example is a self-contained program that can be run with `cargo run -p example-<name>`.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Example | Description | Local K3D | Existing Cluster | Hardware Needed |
|
||||
|---------|-------------|:---------:|:----------------:|:---------------:|
|
||||
| `postgresql` | Deploy a PostgreSQL cluster | ✅ | ✅ | — |
|
||||
| `ntfy` | Deploy ntfy notification server | ✅ | ✅ | — |
|
||||
| `tenant` | Create a multi-tenant namespace | ✅ | ✅ | — |
|
||||
| `cert_manager` | Provision TLS certificates | ✅ | ✅ | — |
|
||||
| `node_health` | Check Kubernetes node health | ✅ | ✅ | — |
|
||||
| `monitoring` | Deploy Prometheus alerting | ✅ | ✅ | — |
|
||||
| `monitoring_with_tenant` | Monitoring + tenant isolation | ✅ | ✅ | — |
|
||||
| `operatorhub_catalog` | Install OperatorHub catalog | ✅ | ✅ | — |
|
||||
| `validate_ceph_cluster_health` | Verify Ceph cluster health | — | ✅ | Rook/Ceph |
|
||||
| `remove_rook_osd` | Remove a Rook OSD | — | ✅ | Rook/Ceph |
|
||||
| `brocade_snmp_server` | Configure Brocade switch SNMP | — | ✅ | Brocade switch |
|
||||
| `opnsense_node_exporter` | Node exporter on OPNsense | — | ✅ | OPNsense firewall |
|
||||
| `okd_pxe` | PXE boot configuration for OKD | — | — | ✅ |
|
||||
| `okd_installation` | Full OKD bare-metal install | — | — | ✅ |
|
||||
| `okd_cluster_alerts` | OKD cluster monitoring alerts | — | ✅ | OKD cluster |
|
||||
| `multisite_postgres` | Multi-site PostgreSQL failover | — | ✅ | Multi-cluster |
|
||||
| `nats` | Deploy NATS messaging | — | ✅ | Multi-cluster |
|
||||
| `nats-supercluster` | NATS supercluster across sites | — | ✅ | Multi-cluster |
|
||||
| `lamp` | LAMP stack deployment | ✅ | ✅ | — |
|
||||
| `openbao` | Deploy OpenBao vault | ✅ | ✅ | — |
|
||||
| `zitadel` | Deploy Zitadel identity provider | ✅ | ✅ | — |
|
||||
| `try_rust_webapp` | Rust webapp with packaging | ✅ | ✅ | Submodule |
|
||||
| `rust` | Rust webapp with full monitoring | ✅ | ✅ | — |
|
||||
| `rhob_application_monitoring` | RHOB monitoring setup | ✅ | ✅ | — |
|
||||
| `sttest` | Full OKD stack test | — | — | ✅ |
|
||||
| `application_monitoring_with_tenant` | App monitoring + tenant | — | ✅ | OKD cluster |
|
||||
| `kube-rs` | Direct kube-rs client usage | ✅ | ✅ | — |
|
||||
| `k8s_drain_node` | Drain a Kubernetes node | ✅ | ✅ | — |
|
||||
| `k8s_write_file_on_node` | Write files to K8s nodes | ✅ | ✅ | — |
|
||||
| `harmony_inventory_builder` | Discover hosts via subnet scan | ✅ | — | — |
|
||||
| `cli` | CLI tool with inventory discovery | ✅ | — | — |
|
||||
| `tui` | Terminal UI demonstration | ✅ | — | — |
|
||||
|
||||
## Status Legend
|
||||
|
||||
| Symbol | Meaning |
|
||||
|--------|---------|
|
||||
| ✅ | Works out-of-the-box |
|
||||
| — | Not applicable or requires specific setup |
|
||||
|
||||
## By Category
|
||||
|
||||
### Data Services
|
||||
- **`postgresql`** — Deploy a PostgreSQL cluster via CloudNativePG
|
||||
- **`multisite_postgres`** — Multi-site PostgreSQL with failover
|
||||
- **`public_postgres`** — Public-facing PostgreSQL (⚠️ uses NationTech DNS)
|
||||
|
||||
### Kubernetes Utilities
|
||||
- **`node_health`** — Check node health in a cluster
|
||||
- **`k8s_drain_node`** — Drain and reboot a node
|
||||
- **`k8s_write_file_on_node`** — Write files to nodes
|
||||
- **`validate_ceph_cluster_health`** — Verify Ceph/Rook cluster health
|
||||
- **`remove_rook_osd`** — Remove an OSD from Rook/Ceph
|
||||
- **`kube-rs`** — Direct Kubernetes client usage demo
|
||||
|
||||
### Monitoring & Alerting
|
||||
- **`monitoring`** — Deploy Prometheus alerting with Discord webhooks
|
||||
- **`monitoring_with_tenant`** — Monitoring with tenant isolation
|
||||
- **`ntfy`** — Deploy ntfy notification server
|
||||
- **`okd_cluster_alerts`** — OKD-specific cluster alerts
|
||||
|
||||
### Application Deployment
|
||||
- **`try_rust_webapp`** — Deploy a Rust webapp with packaging (⚠️ requires `tryrust.org` submodule)
|
||||
- **`rust`** — Rust webapp with full monitoring features
|
||||
- **`rhob_application_monitoring`** — Red Hat Observability Stack monitoring
|
||||
- **`lamp`** — LAMP stack deployment (⚠️ uses NationTech DNS)
|
||||
- **`application_monitoring_with_tenant`** — App monitoring with tenant isolation
|
||||
|
||||
### Infrastructure & Bare Metal
|
||||
- **`okd_installation`** — Full OKD cluster from scratch
|
||||
- **`okd_pxe`** — PXE boot configuration for OKD
|
||||
- **`sttest`** — Full OKD stack test with specific hardware
|
||||
- **`brocade_snmp_server`** — Configure Brocade switch via SNMP
|
||||
- **`opnsense_node_exporter`** — Node exporter on OPNsense firewall
|
||||
|
||||
### Multi-Cluster
|
||||
- **`nats`** — NATS deployment on a cluster
|
||||
- **`nats-supercluster`** — NATS supercluster across multiple sites
|
||||
- **`multisite_postgres`** — PostgreSQL with multi-site failover
|
||||
|
||||
### Identity & Secrets
|
||||
- **`openbao`** — Deploy OpenBao vault (⚠️ uses NationTech DNS)
|
||||
- **`zitadel`** — Deploy Zitadel identity provider (⚠️ uses NationTech DNS)
|
||||
|
||||
### Cluster Services
|
||||
- **`cert_manager`** — Provision TLS certificates
|
||||
- **`tenant`** — Create a multi-tenant namespace
|
||||
- **`operatorhub_catalog`** — Install OperatorHub catalog sources
|
||||
|
||||
### Development & Testing
|
||||
- **`cli`** — CLI tool with inventory discovery
|
||||
- **`tui`** — Terminal UI demonstration
|
||||
- **`harmony_inventory_builder`** — Host discovery via subnet scan
|
||||
|
||||
## Running Examples
|
||||
|
||||
```bash
|
||||
# Build first
|
||||
cargo build --release
|
||||
|
||||
# Run any example
|
||||
cargo run -p example-postgresql
|
||||
cargo run -p example-ntfy
|
||||
cargo run -p example-tenant
|
||||
```
|
||||
|
||||
For examples that need an existing Kubernetes cluster:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/path/to/your/kubeconfig
|
||||
export HARMONY_USE_LOCAL_K3D=false
|
||||
export HARMONY_AUTOINSTALL=false
|
||||
|
||||
cargo run -p example-monitoring
|
||||
```
|
||||
|
||||
## Notes on Private Infrastructure
|
||||
|
||||
Some examples use NationTech-hosted infrastructure by default (DNS domains like `*.nationtech.io`, `*.harmony.mcd`). These are not suitable for public use without modification. See the [Getting Started Guide](../docs/guides/getting-started.md) for the recommended public examples.
|
||||
@@ -7,7 +7,7 @@ use harmony::{
|
||||
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{K8sAnywhereTopology, monitoring::AlertRoute, tenant::TenantConfig},
|
||||
topology::{K8sAnywhereTopology, tenant::TenantConfig},
|
||||
};
|
||||
use harmony_types::id::Id;
|
||||
use harmony_types::net::Url;
|
||||
@@ -33,14 +33,9 @@ async fn main() {
|
||||
service_port: 3000,
|
||||
});
|
||||
|
||||
let receiver_name = "sample-webhook-receiver".to_string();
|
||||
|
||||
let webhook_receiver = WebhookReceiver {
|
||||
name: receiver_name.clone(),
|
||||
name: "sample-webhook-receiver".to_string(),
|
||||
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
|
||||
route: AlertRoute {
|
||||
..AlertRoute::default(receiver_name)
|
||||
},
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
|
||||
15
examples/example_linux_vm/Cargo.toml
Normal file
15
examples/example_linux_vm/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "example_linux_vm"
|
||||
version.workspace = true
|
||||
edition = "2024"
|
||||
license.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "example_linux_vm"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
tokio.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
43
examples/example_linux_vm/README.md
Normal file
43
examples/example_linux_vm/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Example: Linux VM from ISO
|
||||
|
||||
This example deploys a simple Linux virtual machine from an ISO URL.
|
||||
|
||||
## What it creates
|
||||
|
||||
- One isolated virtual network (`linuxvm-net`, 192.168.101.0/24)
|
||||
- One Ubuntu Server VM with the ISO attached as a CD-ROM
|
||||
- The VM is configured to boot from the CD-ROM first, allowing installation
|
||||
- After installation, the VM can be rebooted to boot from disk
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- A running KVM hypervisor (local or remote)
|
||||
- `HARMONY_KVM_URI` environment variable pointing to the hypervisor (defaults to `qemu:///system`)
|
||||
- `HARMONY_KVM_IMAGE_DIR` environment variable for storing VM images (defaults to harmony data dir)
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
cargo run -p example_linux_vm
|
||||
```
|
||||
|
||||
## After deployment
|
||||
|
||||
Once the VM is running, you can connect to its console:
|
||||
|
||||
```bash
|
||||
virsh -c qemu:///system console linux-vm
|
||||
```
|
||||
|
||||
To access the VM via SSH after installation, you'll need to configure a bridged network or port forwarding.
|
||||
|
||||
## Clean up
|
||||
|
||||
To remove the VM and network:
|
||||
|
||||
```bash
|
||||
virsh -c qemu:///system destroy linux-vm
|
||||
virsh -c qemu:///system undefine linux-vm
|
||||
virsh -c qemu:///system net-destroy linuxvm-net
|
||||
virsh -c qemu:///system net-undefine linuxvm-net
|
||||
```
|
||||
63
examples/example_linux_vm/src/main.rs
Normal file
63
examples/example_linux_vm/src/main.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use harmony::modules::kvm::config::init_executor;
|
||||
use harmony::modules::kvm::{BootDevice, NetworkConfig, NetworkRef, VmConfig};
|
||||
use log::info;
|
||||
|
||||
const NETWORK_NAME: &str = "linuxvm-net";
|
||||
const NETWORK_GATEWAY: &str = "192.168.101.1";
|
||||
const NETWORK_PREFIX: u8 = 24;
|
||||
|
||||
const UBUNTU_ISO_URL: &str =
|
||||
"https://releases.ubuntu.com/24.04/ubuntu-24.04.3-live-server-amd64.iso";
|
||||
|
||||
pub async fn deploy_linux_vm() -> Result<(), String> {
|
||||
let executor = init_executor().map_err(|e| format!("KVM initialization failed: {e}"))?;
|
||||
|
||||
let network = NetworkConfig::builder(NETWORK_NAME)
|
||||
.bridge("virbr101")
|
||||
.subnet(NETWORK_GATEWAY, NETWORK_PREFIX)
|
||||
.build();
|
||||
|
||||
info!("Ensuring network '{NETWORK_NAME}' ({NETWORK_GATEWAY}/{NETWORK_PREFIX}) exists");
|
||||
executor
|
||||
.ensure_network(network)
|
||||
.await
|
||||
.map_err(|e| format!("Network setup failed: {e}"))?;
|
||||
|
||||
let vm = linux_vm();
|
||||
info!("Defining Linux VM '{}'", vm.name);
|
||||
executor
|
||||
.ensure_vm(vm.clone())
|
||||
.await
|
||||
.map_err(|e| format!("Linux VM setup failed: {e}"))?;
|
||||
|
||||
info!("Starting VM '{}'", vm.name);
|
||||
executor
|
||||
.start_vm(&vm.name)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to start VM: {e}"))?;
|
||||
|
||||
info!(
|
||||
"Linux VM '{}' is running. \
|
||||
Connect to the console using: virsh -c qemu:///system console {}",
|
||||
vm.name, vm.name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn linux_vm() -> VmConfig {
|
||||
VmConfig::builder("linux-vm")
|
||||
.vcpus(2)
|
||||
.memory_gb(4)
|
||||
.disk(20)
|
||||
.network(NetworkRef::named(NETWORK_NAME))
|
||||
.cdrom(UBUNTU_ISO_URL)
|
||||
.boot_order([BootDevice::Cdrom, BootDevice::Disk])
|
||||
.build()
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), String> {
|
||||
env_logger::init();
|
||||
deploy_linux_vm().await
|
||||
}
|
||||
15
examples/kvm_okd_ha_cluster/Cargo.toml
Normal file
15
examples/kvm_okd_ha_cluster/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "example-kvm-okd-ha-cluster"
|
||||
version.workspace = true
|
||||
edition = "2024"
|
||||
license.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "kvm_okd_ha_cluster"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
tokio.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
100
examples/kvm_okd_ha_cluster/README.md
Normal file
100
examples/kvm_okd_ha_cluster/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# OKD HA Cluster on KVM
|
||||
|
||||
Deploys a complete OKD high-availability cluster on a KVM hypervisor using
|
||||
Harmony's KVM module. All infrastructure is defined in Rust — no YAML, no
|
||||
shell scripts, no hand-crafted XML.
|
||||
|
||||
## What it creates
|
||||
|
||||
| Resource | Details |
|
||||
|-------------------|------------------------------------------|
|
||||
| Virtual network | `harmonylan` — 192.168.100.0/24, NAT |
|
||||
| OPNsense VM | 2 vCPU / 4 GiB RAM — gateway + PXE |
|
||||
| Control plane ×3 | 4 vCPU / 16 GiB RAM — `cp0` … `cp2` |
|
||||
| Worker ×3 | 8 vCPU / 32 GiB RAM — `worker0` … `worker2` |
|
||||
|
||||
## Architecture
|
||||
|
||||
All VMs share the same `harmonylan` virtual network. OPNsense sits on both
|
||||
that network and the host bridge, acting as the gateway and PXE server.
|
||||
|
||||
```
|
||||
Host network (bridge)
|
||||
│
|
||||
┌───────┴──────────┐
|
||||
│ OPNsense │ 192.168.100.1
|
||||
│ gateway + PXE │
|
||||
└───────┬──────────┘
|
||||
│
|
||||
│ harmonylan (192.168.100.0/24)
|
||||
├─────────────┬──────────────────┬──────────────────┐
|
||||
│ │ │ │
|
||||
┌───────┴──┐ ┌──────┴───┐ ┌──────────┴─┐ ┌──────────┴─┐
|
||||
│ cp0 │ │ cp1 │ │ cp2 │ │ worker0 │
|
||||
│ .10 │ │ .11 │ │ .12 │ │ .20 │
|
||||
└──────────┘ └──────────┘ └────────────┘ └──────┬─────┘
|
||||
│
|
||||
┌───────┴────┐
|
||||
│ worker1 │
|
||||
│ .21 │
|
||||
└───────┬────┘
|
||||
│
|
||||
┌───────┴────┐
|
||||
│ worker2 │
|
||||
│ .22 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
All nodes PXE boot from the network interface. OPNsense serves the OKD
|
||||
bootstrap images via TFTP/iPXE and handles DHCP for the whole subnet.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Linux host with KVM/QEMU and libvirt installed
|
||||
- `libvirt-dev` headers (for building the `virt` crate)
|
||||
- A `default` storage pool configured in libvirt
|
||||
- Sufficient disk space (~550 GiB for all VM images)
|
||||
|
||||
## Running
|
||||
|
||||
```bash
|
||||
cargo run --bin kvm_okd_ha_cluster
|
||||
```
|
||||
|
||||
Set `RUST_LOG=info` (or `debug`) to control verbosity.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Environment variable | Default | Description |
|
||||
|-------------------------|--------------------|-------------------------------------|
|
||||
| `HARMONY_KVM_URI` | `qemu:///system` | Libvirt connection URI |
|
||||
| `HARMONY_KVM_IMAGE_DIR` | harmony data dir | Directory for qcow2 disk images |
|
||||
|
||||
For a remote KVM host over SSH:
|
||||
|
||||
```bash
|
||||
export HARMONY_KVM_URI="qemu+ssh://user@myhost/system"
|
||||
```
|
||||
|
||||
## What happens after `cargo run`
|
||||
|
||||
The program defines all resources in libvirt but does not start any VMs.
|
||||
Next steps:
|
||||
|
||||
1. Start OPNsense: `virsh start opnsense-harmony`
|
||||
2. Connect to the OPNsense web UI at `https://192.168.100.1`
|
||||
3. Configure DHCP, TFTP, and the iPXE menu for OKD
|
||||
4. Start the control plane and worker nodes — they will PXE boot and begin
|
||||
the OKD installation automatically
|
||||
|
||||
## Cleanup
|
||||
|
||||
```bash
|
||||
for vm in opnsense-harmony cp0-harmony cp1-harmony cp2-harmony \
|
||||
worker0-harmony worker1-harmony worker2-harmony; do
|
||||
virsh destroy "$vm" 2>/dev/null || true
|
||||
virsh undefine "$vm" --remove-all-storage 2>/dev/null || true
|
||||
done
|
||||
virsh net-destroy harmonylan 2>/dev/null || true
|
||||
virsh net-undefine harmonylan 2>/dev/null || true
|
||||
```
|
||||
132
examples/kvm_okd_ha_cluster/src/lib.rs
Normal file
132
examples/kvm_okd_ha_cluster/src/lib.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use harmony::modules::kvm::{
|
||||
BootDevice, NetworkConfig, NetworkRef, VmConfig, config::init_executor,
|
||||
};
|
||||
use log::info;
|
||||
|
||||
const NETWORK_NAME: &str = "harmonylan";
|
||||
const NETWORK_GATEWAY: &str = "192.168.100.1";
|
||||
const NETWORK_PREFIX: u8 = 24;
|
||||
|
||||
const OPNSENSE_IP: &str = "192.168.100.1";
|
||||
|
||||
/// Deploys a full OKD HA cluster on a local or remote KVM hypervisor.
|
||||
///
|
||||
/// # What it creates
|
||||
///
|
||||
/// - One isolated virtual network (`harmonylan`, 192.168.100.0/24)
|
||||
/// - One OPNsense VM acting as the cluster gateway and PXE server
|
||||
/// - Three OKD control-plane nodes
|
||||
/// - Three OKD worker nodes
|
||||
///
|
||||
/// All nodes are configured to PXE boot from the network so that OPNsense
|
||||
/// can drive unattended OKD installation via TFTP/iPXE.
|
||||
///
|
||||
/// # Configuration
|
||||
///
|
||||
/// | Environment variable | Default | Description |
|
||||
/// |---------------------------|-----------------------|-----------------------------------|
|
||||
/// | `HARMONY_KVM_URI` | `qemu:///system` | Libvirt connection URI |
|
||||
/// | `HARMONY_KVM_IMAGE_DIR` | harmony data dir | Directory for qcow2 disk images |
|
||||
pub async fn deploy_okd_ha_cluster() -> Result<(), String> {
|
||||
let executor = init_executor().map_err(|e| format!("KVM initialisation failed: {e}"))?;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Network
|
||||
// -------------------------------------------------------------------------
|
||||
let network = NetworkConfig::builder(NETWORK_NAME)
|
||||
.bridge("virbr100")
|
||||
.subnet(NETWORK_GATEWAY, NETWORK_PREFIX)
|
||||
.build();
|
||||
|
||||
info!("Ensuring network '{NETWORK_NAME}' ({NETWORK_GATEWAY}/{NETWORK_PREFIX}) exists");
|
||||
executor
|
||||
.ensure_network(network)
|
||||
.await
|
||||
.map_err(|e| format!("Network setup failed: {e}"))?;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// OPNsense gateway / PXE server
|
||||
// -------------------------------------------------------------------------
|
||||
let opnsense = opnsense_vm();
|
||||
info!("Defining OPNsense VM '{}'", opnsense.name);
|
||||
executor
|
||||
.ensure_vm(opnsense)
|
||||
.await
|
||||
.map_err(|e| format!("OPNsense VM setup failed: {e}"))?;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Control plane nodes
|
||||
// -------------------------------------------------------------------------
|
||||
for i in 0u8..3 {
|
||||
let vm = control_plane_vm(i);
|
||||
info!("Defining control plane VM '{}'", vm.name);
|
||||
executor
|
||||
.ensure_vm(vm)
|
||||
.await
|
||||
.map_err(|e| format!("Control plane VM setup failed: {e}"))?;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Worker nodes
|
||||
// -------------------------------------------------------------------------
|
||||
for i in 0u8..3 {
|
||||
let vm = worker_vm(i);
|
||||
info!("Defining worker VM '{}'", vm.name);
|
||||
executor
|
||||
.ensure_vm(vm)
|
||||
.await
|
||||
.map_err(|e| format!("Worker VM setup failed: {e}"))?;
|
||||
}
|
||||
|
||||
info!(
|
||||
"OKD HA cluster infrastructure ready. \
|
||||
Connect OPNsense at https://{OPNSENSE_IP} to configure DHCP, TFTP, and PXE \
|
||||
before starting the nodes."
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// VM definitions
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
/// OPNsense firewall — gateway and PXE server for the cluster.
|
||||
///
|
||||
/// Connected to both the host bridge (WAN) and `harmonylan` (LAN). It manages
|
||||
/// DHCP, TFTP, and the PXE menu that drives OKD installation on all other VMs.
|
||||
fn opnsense_vm() -> VmConfig {
|
||||
VmConfig::builder("opnsense-harmony")
|
||||
.vcpus(2)
|
||||
.memory_gb(4)
|
||||
.disk(20) // OS disk: vda
|
||||
.network(NetworkRef::named(NETWORK_NAME))
|
||||
.boot_order([BootDevice::Cdrom, BootDevice::Disk])
|
||||
.build()
|
||||
}
|
||||
|
||||
/// One OKD control-plane node. Indexed 0..2 → `cp0-harmony` … `cp2-harmony`.
|
||||
///
|
||||
/// Boots from network so OPNsense can serve the OKD bootstrap image via PXE.
|
||||
fn control_plane_vm(index: u8) -> VmConfig {
|
||||
VmConfig::builder(format!("cp{index}-harmony"))
|
||||
.vcpus(4)
|
||||
.memory_gb(16)
|
||||
.disk(120) // OS + etcd: vda
|
||||
.network(NetworkRef::named(NETWORK_NAME))
|
||||
.boot_order([BootDevice::Network, BootDevice::Disk])
|
||||
.build()
|
||||
}
|
||||
|
||||
/// One OKD worker node. Indexed 0..2 → `worker0-harmony` … `worker2-harmony`.
|
||||
///
|
||||
/// Boots from network for automated OKD installation.
|
||||
fn worker_vm(index: u8) -> VmConfig {
|
||||
VmConfig::builder(format!("worker{index}-harmony"))
|
||||
.vcpus(8)
|
||||
.memory_gb(32)
|
||||
.disk(120) // OS: vda
|
||||
.disk(200) // Persistent storage (ODF/Rook): vdb
|
||||
.network(NetworkRef::named(NETWORK_NAME))
|
||||
.boot_order([BootDevice::Network, BootDevice::Disk])
|
||||
.build()
|
||||
}
|
||||
7
examples/kvm_okd_ha_cluster/src/main.rs
Normal file
7
examples/kvm_okd_ha_cluster/src/main.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
use example_kvm_okd_ha_cluster::deploy_okd_ha_cluster;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), String> {
|
||||
env_logger::init();
|
||||
deploy_okd_ha_cluster().await
|
||||
}
|
||||
@@ -1,45 +1,37 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordReceiver,
|
||||
alert_rule::{
|
||||
alerts::{
|
||||
infra::dell_server::{
|
||||
alert_global_storage_status_critical,
|
||||
alert_global_storage_status_non_recoverable,
|
||||
global_storage_status_degraded_non_critical,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::{
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
},
|
||||
kube_prometheus::{
|
||||
helm::config::KubePrometheusConfig,
|
||||
kube_prometheus_alerting_score::KubePrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
prometheus::alerts::{
|
||||
infra::dell_server::{
|
||||
alert_global_storage_status_critical, alert_global_storage_status_non_recoverable,
|
||||
global_storage_status_degraded_non_critical,
|
||||
},
|
||||
k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
},
|
||||
topology::{K8sAnywhereTopology, monitoring::AlertRoute},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
use harmony_types::{k8s_name::K8sName, net::Url};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let receiver_name = "test-discord".to_string();
|
||||
let discord_receiver = DiscordReceiver {
|
||||
name: receiver_name.clone(),
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: K8sName("test-discord".to_string()),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
route: AlertRoute {
|
||||
..AlertRoute::default(receiver_name)
|
||||
},
|
||||
selectors: vec![],
|
||||
};
|
||||
|
||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||
@@ -78,15 +70,10 @@ async fn main() {
|
||||
endpoints: vec![service_monitor_endpoint],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
|
||||
|
||||
let alerting_score = KubePrometheusAlertingScore {
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
|
||||
service_monitors: vec![service_monitor],
|
||||
scrape_targets: None,
|
||||
config,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
|
||||
@@ -1,32 +1,24 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordReceiver,
|
||||
alert_rule::{
|
||||
alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
},
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
kube_prometheus::{
|
||||
helm::config::KubePrometheusConfig,
|
||||
kube_prometheus_alerting_score::KubePrometheusAlertingScore,
|
||||
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
|
||||
types::{
|
||||
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
|
||||
ServiceMonitorEndpoint,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus::alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
tenant::TenantScore,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology,
|
||||
monitoring::AlertRoute,
|
||||
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
|
||||
},
|
||||
};
|
||||
@@ -50,13 +42,10 @@ async fn main() {
|
||||
},
|
||||
};
|
||||
|
||||
let receiver_name = "test-discord".to_string();
|
||||
let discord_receiver = DiscordReceiver {
|
||||
name: receiver_name.clone(),
|
||||
let discord_receiver = DiscordWebhook {
|
||||
name: K8sName("test-discord".to_string()),
|
||||
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
|
||||
route: AlertRoute {
|
||||
..AlertRoute::default(receiver_name)
|
||||
},
|
||||
selectors: vec![],
|
||||
};
|
||||
|
||||
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
|
||||
@@ -85,14 +74,10 @@ async fn main() {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
|
||||
|
||||
let alerting_score = KubePrometheusAlertingScore {
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(discord_receiver)],
|
||||
rules: vec![Box::new(additional_rules)],
|
||||
service_monitors: vec![service_monitor],
|
||||
scrape_targets: None,
|
||||
config,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
|
||||
@@ -14,7 +14,6 @@ async fn main() {
|
||||
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||
// "default" namespace, 1 instance, 1Gi storage
|
||||
},
|
||||
hostname: "postgrestest.sto1.nationtech.io".to_string(),
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
|
||||
@@ -1,64 +1,35 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::monitoring::{
|
||||
alert_channel::discord_alert_channel::DiscordReceiver,
|
||||
alert_rule::{
|
||||
alerts::{
|
||||
infra::opnsense::high_http_error_rate, k8s::pvc::high_pvc_fill_rate_over_two_days,
|
||||
},
|
||||
prometheus_alert_rule::AlertManagerRuleGroup,
|
||||
},
|
||||
okd::openshift_cluster_alerting_score::OpenshiftClusterAlertScore,
|
||||
scrape_target::prometheus_node_exporter::PrometheusNodeExporter,
|
||||
},
|
||||
topology::{
|
||||
K8sAnywhereTopology,
|
||||
monitoring::{AlertMatcher, AlertRoute, MatchOp},
|
||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
||||
okd::cluster_monitoring::OpenshiftClusterAlertScore,
|
||||
},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
use harmony_macros::{hurl, ip};
|
||||
use harmony_macros::hurl;
|
||||
use harmony_types::k8s_name::K8sName;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let platform_matcher = AlertMatcher {
|
||||
label: "prometheus".to_string(),
|
||||
operator: MatchOp::Eq,
|
||||
value: "openshift-monitoring/k8s".to_string(),
|
||||
};
|
||||
let severity = AlertMatcher {
|
||||
label: "severity".to_string(),
|
||||
operator: MatchOp::Eq,
|
||||
value: "critical".to_string(),
|
||||
};
|
||||
|
||||
let high_http_error_rate = high_http_error_rate();
|
||||
|
||||
let additional_rules = AlertManagerRuleGroup::new("test-rule", vec![high_http_error_rate]);
|
||||
|
||||
let scrape_target = PrometheusNodeExporter {
|
||||
job_name: "firewall".to_string(),
|
||||
metrics_path: "/metrics".to_string(),
|
||||
listen_address: ip!("192.168.1.1"),
|
||||
port: 9100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut sel = HashMap::new();
|
||||
sel.insert(
|
||||
"openshift_io_alert_source".to_string(),
|
||||
"platform".to_string(),
|
||||
);
|
||||
let mut sel2 = HashMap::new();
|
||||
sel2.insert("openshift_io_alert_source".to_string(), "".to_string());
|
||||
let selectors = vec![sel, sel2];
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(OpenshiftClusterAlertScore {
|
||||
receivers: vec![Box::new(DiscordReceiver {
|
||||
name: "crit-wills-discord-channel-example".to_string(),
|
||||
url: hurl!("https://test.io"),
|
||||
route: AlertRoute {
|
||||
matchers: vec![severity],
|
||||
..AlertRoute::default("crit-wills-discord-channel-example".to_string())
|
||||
},
|
||||
receivers: vec![Box::new(DiscordWebhook {
|
||||
name: K8sName("wills-discord-webhook-example".to_string()),
|
||||
url: hurl!("https://something.io"),
|
||||
selectors: selectors,
|
||||
})],
|
||||
sender: harmony::modules::monitoring::okd::OpenshiftClusterAlertSender,
|
||||
rules: vec![Box::new(additional_rules)],
|
||||
scrape_targets: Some(vec![Box::new(scrape_target)]),
|
||||
})],
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -6,7 +6,10 @@ use harmony::{
|
||||
data::{FileContent, FilePath},
|
||||
modules::{
|
||||
inventory::HarmonyDiscoveryStrategy,
|
||||
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||
okd::{
|
||||
installation::OKDInstallationPipeline, ipxe::OKDIpxeScore,
|
||||
load_balancer::OKDLoadBalancerScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
@@ -32,6 +35,7 @@ async fn main() {
|
||||
scores
|
||||
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
|
||||
|
||||
scores.push(Box::new(OKDLoadBalancerScore::new(&topology)));
|
||||
harmony_cli::run(inventory, topology, scores, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
14
examples/penpot/Cargo.toml
Normal file
14
examples/penpot/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "example-penpot"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user