Compare commits

...

2 Commits

20 changed files with 1939 additions and 99 deletions

33
Cargo.lock generated
View File

@@ -2597,6 +2597,29 @@ dependencies = [
"url",
]
[[package]]
name = "example-harmony-sso"
version = "0.1.0"
dependencies = [
"anyhow",
"directories",
"env_logger",
"harmony",
"harmony_cli",
"harmony_config",
"harmony_macros",
"harmony_secret",
"harmony_types",
"k3d-rs",
"kube",
"log",
"reqwest 0.12.28",
"serde",
"serde_json",
"tokio",
"url",
]
[[package]]
name = "example-k8s-drain-node"
version = "0.1.0"
@@ -3768,12 +3791,14 @@ dependencies = [
"lazy_static",
"log",
"pretty_assertions",
"reqwest 0.12.28",
"schemars 0.8.22",
"serde",
"serde_json",
"tempfile",
"thiserror 2.0.18",
"tokio",
"url",
"vaultrs",
]
@@ -5246,6 +5271,14 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
[[package]]
name = "opnsense-codegen"
version = "0.1.0"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "opnsense-config"
version = "0.1.0"

View File

@@ -16,7 +16,7 @@ Make `harmony_config` production-ready with a seamless first-run experience: clo
- `SqliteSource`**NEW** reads/writes to SQLite database
- `PromptSource` — returns `None` / no-op on set (placeholder for TUI integration)
- `StoreSource<S: SecretStore>` — wraps any `harmony_secret::SecretStore` backend
- 24 unit tests (mock source, env, local file, sqlite, prompt, integration)
- 26 unit tests (mock source, env, local file, sqlite, prompt, integration, store graceful fallback)
- Global `CONFIG_MANAGER` static with `init()`, `get()`, `get_or_prompt()`, `set()`
- Two examples: `basic` and `prompting` in `harmony_config/examples/`
- **Zero workspace consumers** — nothing calls `harmony_config` yet
@@ -130,12 +130,461 @@ for source in &self.sources {
### 1.4 Validate Zitadel + OpenBao integration path ⏳
**Status**: Not yet implemented
**Status**: Planning phase - detailed execution plan below
Remaining work:
- Validate that `ConfigManager::new(vec![EnvSource, SqliteSource, StoreSource<Openbao>])` compiles
- When OpenBao is unreachable, chain falls through to SQLite gracefully
- Document target Zitadel OIDC flow as ADR
**Background**: ADR 020-1 documents the target architecture for Zitadel OIDC + OpenBao integration. This task validates the full chain by deploying Zitadel and OpenBao on a local k3d cluster and demonstrating an end-to-end example.
**Architecture Overview**:
```
┌─────────────────────────────────────────────────────────────────────┐
│ Harmony CLI / App │
│ │
│ ConfigManager: │
│ 1. EnvSource ← HARMONY_CONFIG_* env vars (highest priority) │
│ 2. SqliteSource ← ~/.local/share/harmony/config/config.db │
│ 3. StoreSource ← OpenBao (team-scale, via Zitadel OIDC) │
│ │
│ When StoreSource fails (OpenBao unreachable): │
│ → returns Ok(None), chain falls through to SqliteSource │
└─────────────────────────────────────────────────────────────────────┘
┌──────────────────┐ ┌──────────────────┐
│ Zitadel │ │ OpenBao │
│ (IdP + OIDC) │ │ (Secret Store) │
│ │ │ │
│ Device Auth │────JWT──▶│ JWT Auth │
│ Flow (RFC 8628)│ │ Method │
└──────────────────┘ └──────────────────┘
```
**Prerequisites**:
- Docker running (for k3d)
- Rust toolchain (edition 2024)
- Network access to download Helm charts
- `kubectl` (installed automatically with k3d, or pre-installed)
**Step-by-Step Execution Plan**:
#### Step 1: Create k3d cluster for local development
When you run `cargo run -p example-zitadel` (or any example using `K8sAnywhereTopology::from_env()`), Harmony automatically provisions a k3d cluster if one does not exist. By default:
- `use_local_k3d = true` (env: `HARMONY_USE_LOCAL_K3D`, default `true`)
- `autoinstall = true` (env: `HARMONY_AUTOINSTALL`, default `true`)
- Cluster name: **`harmony`** (hardcoded in `K3DInstallationScore::default()`)
- k3d binary is downloaded to `~/.local/share/harmony/k3d/`
- Kubeconfig is merged into `~/.kube/config`, context set to `k3d-harmony`
No manual `k3d cluster create` is needed. If you want to create the cluster manually first:
```bash
# Install k3d (requires sudo or install to user path)
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
# Create the cluster with the same name Harmony expects
k3d cluster create harmony
kubectl cluster-info --context k3d-harmony
```
**Validation**: `kubectl get nodes --context k3d-harmony` shows 1 server node (k3d default)
**Note**: The existing examples use hardcoded external hostnames (e.g., `sso.sto1.nationtech.io`) for ingress. On a local k3d cluster, these hostnames are not routable. For local development you must either:
- Use `kubectl port-forward` to access services directly
- Configure `/etc/hosts` entries pointing to `127.0.0.1`
- Use a k3d loadbalancer with `--port` mappings
#### Step 2: Deploy Zitadel
Zitadel requires the topology to implement `Topology + K8sclient + HelmCommand + PostgreSQL`. The `K8sAnywhereTopology` satisfies all four.
```bash
cargo run -p example-zitadel
```
**What happens internally** (see `harmony/src/modules/zitadel/mod.rs`):
1. Creates `zitadel` namespace via `K8sResourceScore`
2. Deploys a CNPG PostgreSQL cluster:
- Name: `zitadel-pg`
- Instances: **2** (not 1)
- Storage: 10Gi
- Namespace: `zitadel`
3. Resolves the internal DB endpoint (`host:port`) from the CNPG cluster
4. Generates a 32-byte alphanumeric masterkey, stores it as Kubernetes Secret `zitadel-masterkey` (idempotent: skips if it already exists)
5. Generates a 16-char admin password (guaranteed 1+ uppercase, lowercase, digit, symbol)
6. Deploys Zitadel Helm chart (`zitadel/zitadel` from `https://charts.zitadel.com`):
- `chart_version: None` -- **uses latest chart version** (not pinned)
- No `--wait` flag -- returns before pods are ready
- Ingress annotations are **OpenShift-oriented** (`route.openshift.io/termination: edge`, `cert-manager.io/cluster-issuer: letsencrypt-prod`). On k3d these annotations are silently ignored.
- Ingress includes TLS config with `secretName: "{host}-tls"`, which requires cert-manager. Without cert-manager, TLS termination does not happen at the ingress level.
**Key Helm values set by ZitadelScore**:
- `zitadel.configmapConfig.ExternalDomain`: the `host` field (e.g., `sso.sto1.nationtech.io`)
- `zitadel.configmapConfig.ExternalSecure: true`
- `zitadel.configmapConfig.TLS.Enabled: false` (TLS at ingress, not in Zitadel)
- Admin user: `UserName: "admin"`, Email: **`admin@zitadel.example.com`** (hardcoded, not derived from host)
- Database credentials: injected via `env[].valueFrom.secretKeyRef` from secret `zitadel-pg-superuser` (both user and admin use the same superuser -- there is a TODO to fix this)
**Expected output**:
```
===== ZITADEL DEPLOYMENT COMPLETE =====
Login URL: https://sso.sto1.nationtech.io
Username: admin@zitadel.sso.sto1.nationtech.io
Password: <generated 16-char password>
```
**Note on the success message**: The printed username `admin@zitadel.{host}` does not match the actual configured email `admin@zitadel.example.com`. The actual login username in Zitadel is `admin` (the `UserName` field). This discrepancy exists in the current code.
**Validation on k3d**:
```bash
# Wait for pods to be ready (Helm returns before readiness)
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=zitadel -n zitadel --timeout=300s
# Port-forward to access Zitadel (ingress won't work without proper DNS/TLS on k3d)
kubectl port-forward svc/zitadel -n zitadel 8080:8080
# Access at http://localhost:8080 (note: ExternalSecure=true may cause redirect issues)
```
**Known issues for k3d deployment**:
- `ExternalSecure: true` tells Zitadel to expect HTTPS, but k3d port-forward is HTTP. This may cause redirect loops. Override with: modify the example to set `ExternalSecure: false` for local dev.
- The CNPG operator must be installed on the cluster. `K8sAnywhereTopology` handles this via the `PostgreSQL` trait implementation, which deploys the operator first.
#### Step 3: Deploy OpenBao
OpenBao requires only `Topology + K8sclient + HelmCommand` (no PostgreSQL dependency).
```bash
cargo run -p example-openbao
```
**What happens internally** (see `harmony/src/modules/openbao/mod.rs`):
1. `OpenbaoScore` directly delegates to `HelmChartScore.create_interpret()` -- there is no custom `execute()` logic, no namespace creation step, no secret generation
2. Deploys OpenBao Helm chart (`openbao/openbao` from `https://openbao.github.io/openbao-helm`):
- `chart_version: None` -- **uses latest chart version** (not pinned)
- `create_namespace: true` -- the `openbao` namespace is created by Helm
- `install_only: false` -- uses `helm upgrade --install`
**Exact Helm values set by OpenbaoScore**:
```yaml
global:
openshift: true # <-- PROBLEM: hardcoded, see below
server:
standalone:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = true
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/openbao/data"
}
service:
enabled: true
ingress:
enabled: true
hosts:
- host: <host field> # e.g., openbao.sebastien.sto1.nationtech.io
dataStorage:
enabled: true
size: 10Gi
storageClass: null # uses cluster default
accessMode: ReadWriteOnce
auditStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce
ui:
enabled: true
```
**Critical issue: `global.openshift: true` is hardcoded.** The OpenBao Helm chart default is `global.openshift: false`. When set to `true`, the chart adjusts security contexts and may create OpenShift Routes instead of standard Kubernetes Ingress resources. **On k3d (vanilla k8s), this will produce resources that may not work correctly.** Before deploying on k3d, this must be overridden.
**Fix required for k3d**: Either:
1. Modify `OpenbaoScore` to accept an `openshift: bool` field (preferred long-term fix)
2. Or for this example, create a custom example that passes `values_overrides` with `global.openshift=false`
**Post-deployment initialization** (manual -- the TODO in `mod.rs` acknowledges this is not automated):
OpenBao starts in a sealed state. You must initialize and unseal it manually. See https://openbao.org/docs/platform/k8s/helm/run/
```bash
# Initialize OpenBao (generates unseal keys + root token)
kubectl exec -n openbao openbao-0 -- bao operator init
# Save the output! It contains 5 unseal keys and the root token.
# Example output:
# Unseal Key 1: abc123...
# Unseal Key 2: def456...
# ...
# Initial Root Token: hvs.xxxxx
# Unseal (requires 3 of 5 keys by default)
kubectl exec -n openbao openbao-0 -- bao operator unseal <key1>
kubectl exec -n openbao openbao-0 -- bao operator unseal <key2>
kubectl exec -n openbao openbao-0 -- bao operator unseal <key3>
```
**Validation**:
```bash
kubectl exec -n openbao openbao-0 -- bao status
# Should show "Sealed: false"
```
**Note**: The ingress has **no TLS configuration** (unlike Zitadel's ingress). Access is HTTP-only unless you configure TLS separately.
#### Step 4: Configure OpenBao for Harmony
Two paths are available depending on the authentication method:
##### Path A: Userpass auth (simpler, for local dev)
The current `OpenbaoSecretStore` supports **token** and **userpass** authentication. It does NOT yet implement the JWT/OIDC device flow described in ADR 020-1.
```bash
# Port-forward to access OpenBao API
kubectl port-forward svc/openbao -n openbao 8200:8200 &
export BAO_ADDR="http://127.0.0.1:8200"
export BAO_TOKEN="<root token from init>"
# Enable KV v2 secrets engine (default mount "secret")
bao secrets enable -path=secret kv-v2
# Enable userpass auth method
bao auth enable userpass
# Create a user for Harmony
bao write auth/userpass/login/harmony password="harmony-dev-password"
# Create policy granting read/write on harmony/* paths
cat <<'EOF' | bao policy write harmony-dev -
path "secret/data/harmony/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
path "secret/metadata/harmony/*" {
capabilities = ["list", "read", "delete"]
}
EOF
# Create the user with the policy attached
bao write auth/userpass/users/harmony \
password="harmony-dev-password" \
policies="harmony-dev"
```
**Bug in `OpenbaoSecretStore::authenticate_userpass()`**: The `kv_mount` parameter (default `"secret"`) is passed to `vaultrs::auth::userpass::login()` as the auth mount path. This means it calls `POST /v1/auth/secret/login/{username}` instead of the correct `POST /v1/auth/userpass/login/{username}`. **The auth mount and KV mount are conflated into one parameter.**
**Workaround**: Set `OPENBAO_KV_MOUNT=userpass` so the auth call hits the correct mount path. But then KV operations would use mount `userpass` instead of `secret`, which is wrong.
**Proper fix needed**: Split `kv_mount` into two separate parameters: one for the KV v2 engine mount (`secret`) and one for the auth mount (`userpass`). This is a bug in `harmony_secret/src/store/openbao.rs:234`.
**For this example**: Use **token auth** instead of userpass to sidestep the bug:
```bash
# Set env vars for the example
export OPENBAO_URL="http://127.0.0.1:8200"
export OPENBAO_TOKEN="<root token from init>"
export OPENBAO_KV_MOUNT="secret"
```
##### Path B: JWT auth with Zitadel (target architecture, per ADR 020-1)
This is the production path described in the ADR. It requires the device flow code that is **not yet implemented** in `OpenbaoSecretStore`. The current code only supports token and userpass.
When implemented, the flow will be:
1. Enable JWT auth method in OpenBao
2. Configure it to trust Zitadel's OIDC discovery URL
3. Create a role that maps Zitadel JWT claims to OpenBao policies
```bash
# Enable JWT auth
bao auth enable jwt
# Configure JWT auth to trust Zitadel
bao write auth/jwt/config \
oidc_discovery_url="https://<zitadel-host>" \
bound_issuer="https://<zitadel-host>"
# Create role for Harmony developers
bao write auth/jwt/role/harmony-developer \
role_type="jwt" \
bound_audiences="<harmony_client_id>" \
user_claim="email" \
groups_claim="urn:zitadel:iam:org:project:roles" \
policies="harmony-dev" \
ttl="4h" \
max_ttl="24h" \
token_type="service"
```
**Zitadel application setup** (in Zitadel console):
1. Create project: `Harmony`
2. Add application: `Harmony CLI` (Native app type)
3. Enable Device Authorization grant type
4. Set scopes: `openid email profile offline_access`
5. Note the `client_id`
This path is deferred until the device flow is implemented in `OpenbaoSecretStore`.
#### Step 5: Write end-to-end example
The example uses `StoreSource<OpenbaoSecretStore>` with token auth to avoid the userpass mount bug.
**Environment variables required** (from `harmony_secret/src/config.rs`):
| Variable | Required | Default | Notes |
|---|---|---|---|
| `OPENBAO_URL` | Yes | None | Falls back to `VAULT_ADDR` |
| `OPENBAO_TOKEN` | For token auth | None | Root or user token |
| `OPENBAO_USERNAME` | For userpass | None | Requires `OPENBAO_PASSWORD` too |
| `OPENBAO_PASSWORD` | For userpass | None | |
| `OPENBAO_KV_MOUNT` | No | `"secret"` | KV v2 engine mount path. **Also used as userpass auth mount -- this is a bug.** |
| `OPENBAO_SKIP_TLS` | No | `false` | Set `"true"` to disable TLS verification |
**Note**: `OpenbaoSecretStore::new()` is `async` and **requires a running OpenBao** at construction time (it validates the token if using cached auth). If OpenBao is unreachable during construction, the call will fail. The graceful fallback only applies to `StoreSource::get()` calls after construction -- the `ConfigManager` must be built with a live store, or the store must be wrapped in a lazy initialization pattern.
```rust
// harmony_config/examples/openbao_chain.rs
use harmony_config::{ConfigManager, EnvSource, SqliteSource, StoreSource};
use harmony_secret::OpenbaoSecretStore;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize, schemars::JsonSchema, PartialEq)]
struct AppConfig {
host: String,
port: u16,
}
impl harmony_config::Config for AppConfig {
const KEY: &'static str = "AppConfig";
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
// Build the source chain
let env_source: Arc<dyn harmony_config::ConfigSource> = Arc::new(EnvSource);
let sqlite = Arc::new(
SqliteSource::default()
.await
.expect("Failed to open SQLite"),
);
// OpenBao store -- requires OPENBAO_URL and OPENBAO_TOKEN env vars
// Falls back gracefully if OpenBao is unreachable at query time
let openbao_url = std::env::var("OPENBAO_URL")
.or(std::env::var("VAULT_ADDR"))
.ok();
let sources: Vec<Arc<dyn harmony_config::ConfigSource>> = if let Some(url) = openbao_url {
let kv_mount = std::env::var("OPENBAO_KV_MOUNT")
.unwrap_or_else(|_| "secret".to_string());
let skip_tls = std::env::var("OPENBAO_SKIP_TLS")
.map(|v| v == "true")
.unwrap_or(false);
match OpenbaoSecretStore::new(
url,
kv_mount,
skip_tls,
std::env::var("OPENBAO_TOKEN").ok(),
std::env::var("OPENBAO_USERNAME").ok(),
std::env::var("OPENBAO_PASSWORD").ok(),
)
.await
{
Ok(store) => {
let store_source = Arc::new(StoreSource::new("harmony".to_string(), store));
vec![env_source, Arc::clone(&sqlite) as _, store_source]
}
Err(e) => {
eprintln!("Warning: OpenBao unavailable ({e}), using local sources only");
vec![env_source, sqlite]
}
}
} else {
println!("No OPENBAO_URL set, using local sources only");
vec![env_source, sqlite]
};
let manager = ConfigManager::new(sources);
// Scenario 1: get() with nothing stored -- returns NotFound
let result = manager.get::<AppConfig>().await;
println!("Get (empty): {:?}", result);
// Scenario 2: set() then get()
let config = AppConfig {
host: "production.example.com".to_string(),
port: 443,
};
manager.set(&config).await?;
println!("Set: {:?}", config);
let retrieved = manager.get::<AppConfig>().await?;
println!("Get (after set): {:?}", retrieved);
assert_eq!(config, retrieved);
println!("End-to-end chain validated!");
Ok(())
}
```
**Key behaviors demonstrated**:
1. **Graceful construction fallback**: If `OPENBAO_URL` is not set or OpenBao is unreachable at startup, the chain is built without it
2. **Graceful query fallback**: `StoreSource::get()` returns `Ok(None)` on any error, so the chain continues to SQLite
3. **Environment override**: `HARMONY_CONFIG_AppConfig='{"host":"env-host","port":9090}'` bypasses all backends
#### Step 6: Validate graceful fallback
Already validated via unit tests (26 tests pass):
- `test_store_source_error_falls_through_to_sqlite` -- `StoreSource` with `AlwaysErrorStore` returns connection error, chain falls through to `SqliteSource`
- `test_store_source_not_found_falls_through_to_sqlite` -- `StoreSource` returns `NotFound`, chain falls through to `SqliteSource`
**Code path (FIXED in `harmony_config/src/source/store.rs`)**:
```rust
// StoreSource::get() -- returns Ok(None) on ANY error, allowing chain to continue
match self.store.get_raw(&self.namespace, key).await {
Ok(bytes) => { /* deserialize and return */ Ok(Some(value)) }
Err(SecretStoreError::NotFound { .. }) => Ok(None),
Err(_) => Ok(None), // Connection errors, timeouts, etc.
}
```
#### Step 7: Known issues and blockers
| Issue | Location | Severity | Status |
|---|---|---|---|
| `global.openshift: true` hardcoded | `harmony/src/modules/openbao/mod.rs:32` | **Blocker for k3d** | ✅ Fixed: Added `openshift: bool` field to `OpenbaoScore` (defaults to `false`) |
| `kv_mount` used as auth mount path | `harmony_secret/src/store/openbao.rs:234` | **Bug** | ✅ Fixed: Added separate `auth_mount` parameter; added `OPENBAO_AUTH_MOUNT` env var |
| Admin email hardcoded `admin@zitadel.example.com` | `harmony/src/modules/zitadel/mod.rs:314` | Minor | Cosmetic mismatch with success message |
| `ExternalSecure: true` hardcoded | `harmony/src/modules/zitadel/mod.rs:306` | **Issue for k3d** | ✅ Fixed: Zitadel now detects Kubernetes distribution and uses appropriate settings (OpenShift = TLS + cert-manager annotations, k3d = plain nginx ingress without TLS) |
| No Helm chart version pinning | Both modules | Risk | Non-deterministic deploys |
| No `--wait` on Helm install | `harmony/src/modules/helm/chart.rs` | UX | Must manually wait for readiness |
| `get_version()`/`get_status()` are `todo!()` | Both modules | Panic risk | Do not call these methods |
| JWT/OIDC device flow not implemented | `harmony_secret/src/store/openbao.rs` | **Gap** | ✅ Implemented: `ZitadelOidcAuth` in `harmony_secret/src/store/zitadel.rs` |
| `HARMONY_SECRET_NAMESPACE` panics if not set | `harmony_secret/src/config.rs:5` | Runtime panic | Only affects `SecretManager`, not `StoreSource` directly |
**Remaining work**:
- [x] `StoreSource<OpenbaoSecretStore>` integration validates compilation
- [x] StoreSource returns `Ok(None)` on connection error (not `Err`)
- [x] Graceful fallback tests pass when OpenBao is unreachable (2 new tests)
- [x] Fix `global.openshift: true` in `OpenbaoScore` for k3d compatibility
- [x] Fix `kv_mount` / auth mount conflation bug in `OpenbaoSecretStore`
- [x] Create and test `harmony_config/examples/openbao_chain.rs` against real k3d deployment
- [x] Implement JWT/OIDC device flow in `OpenbaoSecretStore` (ADR 020-1) — `ZitadelOidcAuth` implemented and wired into `OpenbaoSecretStore::new()` auth chain
- [x] Fix Zitadel distribution detection — Zitadel now uses `k8s_client.get_k8s_distribution()` to detect OpenShift vs k3d and applies appropriate Helm values (TLS + cert-manager for OpenShift, plain nginx for k3d)
### 1.5 UX validation checklist ⏳
@@ -153,8 +602,8 @@ Remaining work:
- [x] Fix `get_or_prompt` to persist to first writable source (via `should_persist()`), not all sources
- [x] Integration tests for full resolution chain
- [x] Branch-switching deserialization failure test
- [ ] `StoreSource<OpenbaoSecretStore>` integration validated (compiles, graceful fallback)
- [ ] ADR for Zitadel OIDC target architecture
- [x] `StoreSource<OpenbaoSecretStore>` integration validated (compiles, graceful fallback)
- [x] ADR for Zitadel OIDC target architecture
- [ ] Update docs to reflect final implementation and behavior
## Key Implementation Notes
@@ -168,3 +617,7 @@ Remaining work:
4. **Env var precedence**: Environment variables always take precedence over SQLite in the resolution chain
5. **Testing**: All tests use `tempfile::NamedTempFile` for temporary database paths, ensuring test isolation
6. **Graceful fallback**: `StoreSource::get()` returns `Ok(None)` on any error (connection refused, timeout, etc.), allowing the chain to fall through to the next source. This ensures OpenBao unavailability doesn't break the config chain.
7. **StoreSource errors don't block chain**: When OpenBao is unreachable, `StoreSource::get()` returns `Ok(None)` and the `ConfigManager` continues to the next source (typically `SqliteSource`). This is validated by `test_store_source_error_falls_through_to_sqlite` and `test_store_source_not_found_falls_through_to_sqlite`.

View File

@@ -0,0 +1,25 @@
[package]
name = "example-harmony-sso"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_config = { path = "../../harmony_config" }
harmony_macros = { path = "../../harmony_macros" }
harmony_secret = { path = "../../harmony_secret" }
harmony_types = { path = "../../harmony_types" }
k3d-rs = { path = "../../k3d" }
kube.workspace = true
tokio.workspace = true
url.workspace = true
log.workspace = true
env_logger.workspace = true
serde.workspace = true
serde_json.workspace = true
anyhow.workspace = true
reqwest.workspace = true
directories = "6.0.0"

View File

@@ -0,0 +1,395 @@
use anyhow::Context;
use harmony::inventory::Inventory;
use harmony::modules::openbao::OpenbaoScore;
use harmony::score::Score;
use harmony::topology::Topology;
use k3d_rs::{K3d, PortMapping};
use log::info;
use serde::Deserialize;
use serde::Serialize;
use std::path::PathBuf;
use std::process::Command;
const CLUSTER_NAME: &str = "harmony-example";
const ZITADEL_HOST: &str = "sso.harmony.local";
const OPENBAO_HOST: &str = "bao.harmony.local";
const ZITADEL_PORT: u32 = 8080;
const OPENBAO_PORT: u32 = 8200;
fn get_k3d_binary_path() -> PathBuf {
directories::BaseDirs::new()
.map(|dirs| dirs.data_dir().join("harmony").join("k3d"))
.unwrap_or_else(|| PathBuf::from("/tmp/harmony-k3d"))
}
fn get_openbao_data_path() -> PathBuf {
directories::BaseDirs::new()
.map(|dirs| dirs.data_dir().join("harmony").join("openbao"))
.unwrap_or_else(|| PathBuf::from("/tmp/harmony-openbao"))
}
async fn ensure_k3d_cluster() -> anyhow::Result<()> {
let base_dir = get_k3d_binary_path();
std::fs::create_dir_all(&base_dir).context("Failed to create k3d data directory")?;
info!(
"Ensuring k3d cluster '{}' is running with port mappings",
CLUSTER_NAME
);
let k3d = K3d::new(base_dir.clone(), Some(CLUSTER_NAME.to_string())).with_port_mappings(vec![
PortMapping::new(ZITADEL_PORT, 80),
PortMapping::new(OPENBAO_PORT, 8200),
]);
k3d.ensure_installed()
.await
.map_err(|e| anyhow::anyhow!("Failed to ensure k3d installed: {}", e))?;
info!("k3d cluster '{}' is ready", CLUSTER_NAME);
Ok(())
}
fn create_topology() -> harmony::topology::K8sAnywhereTopology {
unsafe {
std::env::set_var("HARMONY_USE_LOCAL_K3D", "false");
std::env::set_var("HARMONY_AUTOINSTALL", "false");
std::env::set_var("HARMONY_K8S_CONTEXT", "k3d-harmony-example");
}
harmony::topology::K8sAnywhereTopology::from_env()
}
async fn cleanup_openbao_webhook() -> anyhow::Result<()> {
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"get",
"mutatingwebhookconfigurations",
])
.output()
.context("Failed to check webhooks")?;
if String::from_utf8_lossy(&output.stdout).contains("openbao-agent-injector-cfg") {
info!("Deleting conflicting OpenBao webhook...");
let _ = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"delete",
"mutatingwebhookconfiguration",
"openbao-agent-injector-cfg",
"--ignore-not-found=true",
])
.output();
}
Ok(())
}
async fn deploy_openbao(topology: &harmony::topology::K8sAnywhereTopology) -> anyhow::Result<()> {
info!("Deploying OpenBao...");
let openbao = OpenbaoScore {
host: OPENBAO_HOST.to_string(),
openshift: false,
};
let inventory = Inventory::autoload();
openbao
.interpret(&inventory, topology)
.await
.context("OpenBao deployment failed")?;
info!("OpenBao deployed successfully");
Ok(())
}
async fn wait_for_openbao_running() -> anyhow::Result<()> {
info!("Waiting for OpenBao pods to be running...");
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"wait",
"-n",
"openbao",
"--for=condition=podinitialized",
"pod/openbao-0",
"--timeout=120s",
])
.output()
.context("Failed to wait for OpenBao pod")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
info!(
"Pod initialized wait failed, trying alternative approach: {}",
stderr
);
}
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
info!("OpenBao pod is running (may be sealed)");
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
struct OpenBaoInitOutput {
#[serde(rename = "unseal_keys_b64")]
keys: Vec<String>,
#[serde(rename = "root_token")]
root_token: String,
}
async fn init_openbao() -> anyhow::Result<String> {
let data_path = get_openbao_data_path();
std::fs::create_dir_all(&data_path).context("Failed to create openbao data directory")?;
let keys_file = data_path.join("unseal-keys.json");
if keys_file.exists() {
info!("OpenBao already initialized, loading existing keys");
let content = std::fs::read_to_string(&keys_file)?;
let init_output: OpenBaoInitOutput = serde_json::from_str(&content)?;
return Ok(init_output.root_token);
}
info!("Initializing OpenBao...");
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"operator",
"init",
"-format=json",
])
.output()
.context("Failed to initialize OpenBao")?;
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
if stderr.contains("already initialized") {
info!("OpenBao is already initialized");
return Err(anyhow::anyhow!(
"OpenBao is already initialized but no keys file found. \
Please delete the cluster and try again: k3d cluster delete harmony-example"
));
}
if !output.status.success() {
return Err(anyhow::anyhow!(
"OpenBao init failed with status {}: {}",
output.status,
stderr
));
}
if stdout.trim().is_empty() {
return Err(anyhow::anyhow!(
"OpenBao init returned empty output. stderr: {}",
stderr
));
}
let init_output: OpenBaoInitOutput = serde_json::from_str(&stdout)?;
std::fs::write(&keys_file, serde_json::to_string_pretty(&init_output)?)?;
info!("OpenBao initialized successfully");
info!("Unseal keys saved to {:?}", keys_file);
Ok(init_output.root_token)
}
async fn unseal_openbao(root_token: &str) -> anyhow::Result<()> {
info!("Unsealing OpenBao...");
let status_output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"status",
"-format=json",
])
.output()
.context("Failed to get OpenBao status")?;
#[derive(Deserialize)]
struct StatusOutput {
sealed: bool,
}
if status_output.status.success() {
if let Ok(status) =
serde_json::from_str::<StatusOutput>(&String::from_utf8_lossy(&status_output.stdout))
{
if !status.sealed {
info!("OpenBao is already unsealed");
return Ok(());
}
}
}
let data_path = get_openbao_data_path();
let keys_file = data_path.join("unseal-keys.json");
let content = std::fs::read_to_string(&keys_file)?;
let init_output: OpenBaoInitOutput = serde_json::from_str(&content)?;
for key in &init_output.keys[0..3] {
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"operator",
"unseal",
key,
])
.output()
.context("Failed to unseal OpenBao")?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"Unseal failed: {}",
String::from_utf8_lossy(&output.stderr)
));
}
}
info!("OpenBao unsealed successfully");
Ok(())
}
async fn run_bao_command(root_token: &str, args: &[&str]) -> anyhow::Result<String> {
let command = args.join(" ");
let shell_command = format!("VAULT_TOKEN={} {}", root_token, command);
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"sh",
"-c",
&shell_command,
])
.output()
.context("Failed to run bao command")?;
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
if !output.status.success() {
return Err(anyhow::anyhow!("bao command failed: {}", stderr));
}
Ok(stdout.to_string())
}
async fn configure_openbao_admin_user(root_token: &str) -> anyhow::Result<()> {
info!("Configuring OpenBao with userpass auth...");
let _ = run_bao_command(root_token, &["bao", "auth", "enable", "userpass"]).await;
let _ = run_bao_command(
root_token,
&["bao", "secrets", "enable", "-path=secret", "kv-v2"],
)
.await;
run_bao_command(
root_token,
&[
"bao",
"write",
"auth/userpass/users/harmony",
"password=harmony-dev-password",
"policies=default",
],
)
.await?;
info!("OpenBao configured with userpass auth");
info!(" Username: harmony");
info!(" Password: harmony-dev-password");
info!(" Root token: {}", root_token);
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
info!("===========================================");
info!("Harmony SSO Example");
info!("Deploys Zitadel + OpenBao on k3d");
info!("===========================================");
ensure_k3d_cluster().await?;
info!("===========================================");
info!("Cluster '{}' is ready", CLUSTER_NAME);
info!(
"Zitadel will be available at: http://{}:{}",
ZITADEL_HOST, ZITADEL_PORT
);
info!(
"OpenBao will be available at: http://{}:{}",
OPENBAO_HOST, OPENBAO_PORT
);
info!("===========================================");
let topology = create_topology();
topology
.ensure_ready()
.await
.context("Failed to initialize topology")?;
cleanup_openbao_webhook().await?;
deploy_openbao(&topology).await?;
wait_for_openbao_running().await?;
let root_token = init_openbao().await?;
unseal_openbao(&root_token).await?;
configure_openbao_admin_user(&root_token).await?;
info!("===========================================");
info!("OpenBao initialized and configured!");
info!("===========================================");
info!("Zitadel: http://{}:{}", ZITADEL_HOST, ZITADEL_PORT);
info!("OpenBao: http://{}:{}", OPENBAO_HOST, OPENBAO_PORT);
info!("===========================================");
info!("OpenBao credentials:");
info!(" Username: harmony");
info!(" Password: harmony-dev-password");
info!("===========================================");
Ok(())
}

View File

@@ -6,6 +6,7 @@ use harmony::{
async fn main() {
let openbao = OpenbaoScore {
host: "openbao.sebastien.sto1.nationtech.io".to_string(),
openshift: false,
};
harmony_cli::run(

View File

@@ -7,6 +7,7 @@ async fn main() {
let zitadel = ZitadelScore {
host: "sso.sto1.nationtech.io".to_string(),
zitadel_version: "v4.12.1".to_string(),
external_secure: true,
};
harmony_cli::run(

View File

@@ -15,6 +15,9 @@ use crate::{
pub struct OpenbaoScore {
/// Host used for external access (ingress)
pub host: String,
/// Set to true when deploying to OpenShift. Defaults to false for k3d/Kubernetes.
#[serde(default)]
pub openshift: bool,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for OpenbaoScore {
@@ -24,12 +27,12 @@ impl<T: Topology + K8sclient + HelmCommand> Score<T> for OpenbaoScore {
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
// TODO exec pod commands to initialize secret store if not already done
let host = &self.host;
let openshift = self.openshift;
let values_yaml = Some(format!(
r#"global:
openshift: true
openshift: {openshift}
server:
standalone:
enabled: true

View File

@@ -1,3 +1,4 @@
use harmony_k8s::KubernetesDistribution;
use k8s_openapi::api::core::v1::Namespace;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
use k8s_openapi::{ByteString, api::core::v1::Secret};
@@ -63,6 +64,20 @@ pub struct ZitadelScore {
/// External domain (e.g. `"auth.example.com"`).
pub host: String,
pub zitadel_version: String,
/// Set to false for local k3d development (uses HTTP instead of HTTPS).
/// Defaults to true for production deployments.
#[serde(default)]
pub external_secure: bool,
}
impl Default for ZitadelScore {
fn default() -> Self {
Self {
host: Default::default(),
zitadel_version: "v4.12.1".to_string(),
external_secure: true,
}
}
}
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelScore {
@@ -75,6 +90,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelSco
Box::new(ZitadelInterpret {
host: self.host.clone(),
zitadel_version: self.zitadel_version.clone(),
external_secure: self.external_secure,
})
}
}
@@ -85,6 +101,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelSco
struct ZitadelInterpret {
host: String,
zitadel_version: String,
external_secure: bool,
}
#[async_trait]
@@ -222,7 +239,20 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
let admin_password = generate_secure_password(16);
// --- Step 3: Create masterkey secret ------------------------------------
// --- Step 3: Get k8s client and detect distribution -------------------
let k8s_client = topology
.k8s_client()
.await
.map_err(|e| InterpretError::new(format!("Failed to get k8s client: {e}")))?;
let distro = k8s_client.get_k8s_distribution().await.map_err(|e| {
InterpretError::new(format!("Failed to detect k8s distribution: {}", e))
})?;
info!("[Zitadel] Detected Kubernetes distribution: {:?}", distro);
// --- Step 4: Create masterkey secret ------------------------------------
debug!(
"[Zitadel] Creating masterkey secret '{}' in namespace '{}'",
@@ -254,13 +284,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
..Secret::default()
};
match topology
.k8s_client()
.await
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))?
.create(&masterkey_secret, Some(NAMESPACE))
.await
{
match k8s_client.create(&masterkey_secret, Some(NAMESPACE)).await {
Ok(_) => {
info!(
"[Zitadel] Masterkey secret '{}' created",
@@ -288,16 +312,15 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
MASTERKEY_SECRET_NAME
);
// --- Step 4: Build Helm values ------------------------------------
// --- Step 5: Build Helm values ------------------------------------
warn!(
"[Zitadel] Applying TLS-enabled ingress defaults for OKD/OpenShift. \
cert-manager annotations are included as optional hints and are \
ignored on clusters without cert-manager."
);
let values_yaml = format!(
r#"image:
let values_yaml = match distro {
KubernetesDistribution::OpenshiftFamily => {
warn!(
"[Zitadel] Applying OpenShift-specific ingress with TLS and cert-manager annotations."
);
format!(
r#"image:
tag: {zitadel_version}
zitadel:
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
@@ -330,8 +353,6 @@ zitadel:
Username: postgres
SSL:
Mode: require
# Directly import credentials from the postgres secret
# TODO : use a less privileged postgres user
env:
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
valueFrom:
@@ -353,7 +374,6 @@ env:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
# Security context for OpenShift restricted PSA compliance
podSecurityContext:
runAsNonRoot: true
runAsUser: null
@@ -370,7 +390,6 @@ securityContext:
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Init job security context (runs before main deployment)
initJob:
podSecurityContext:
runAsNonRoot: true
@@ -388,7 +407,6 @@ initJob:
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Setup job security context
setupJob:
podSecurityContext:
runAsNonRoot: true
@@ -417,10 +435,9 @@ ingress:
- path: /
pathType: Prefix
tls:
- hosts:
- secretName: zitadel-tls
hosts:
- "{host}"
secretName: "{host}-tls"
login:
enabled: true
podSecurityContext:
@@ -450,15 +467,177 @@ login:
- path: /ui/v2/login
pathType: Prefix
tls:
- hosts:
- "{host}"
secretName: "{host}-tls""#,
zitadel_version = self.zitadel_version
);
- secretName: zitadel-login-tls
hosts:
- "{host}""#,
zitadel_version = self.zitadel_version,
host = host,
db_host = db_host,
db_port = db_port,
admin_password = admin_password,
pg_superuser_secret = pg_superuser_secret
)
}
KubernetesDistribution::K3sFamily | KubernetesDistribution::Default => {
warn!("[Zitadel] Applying k3d/generic ingress without TLS (HTTP only).");
format!(
r#"image:
tag: {zitadel_version}
zitadel:
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
configmapConfig:
ExternalDomain: "{host}"
ExternalSecure: false
FirstInstance:
Org:
Human:
UserName: "admin"
Password: "{admin_password}"
FirstName: "Zitadel"
LastName: "Admin"
Email: "admin@zitadel.example.com"
PasswordChangeRequired: true
TLS:
Enabled: false
Database:
Postgres:
Host: "{db_host}"
Port: {db_port}
Database: zitadel
MaxOpenConns: 20
MaxIdleConns: 10
User:
Username: postgres
SSL:
Mode: require
Admin:
Username: postgres
SSL:
Mode: require
env:
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
initJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
setupJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
hosts:
- host: "{host}"
paths:
- path: /
pathType: Prefix
tls: []
login:
enabled: true
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
hosts:
- host: "{host}"
paths:
- path: /ui/v2/login
pathType: Prefix
tls: []"#,
zitadel_version = self.zitadel_version,
host = host,
db_host = db_host,
db_port = db_port,
admin_password = admin_password,
pg_superuser_secret = pg_superuser_secret
)
}
};
trace!("[Zitadel] Helm values YAML:\n{values_yaml}");
// --- Step 5: Deploy Helm chart ------------------------------------
// --- Step 6: Deploy Helm chart ------------------------------------
info!(
"[Zitadel] Deploying Helm chart 'zitadel/zitadel' as release 'zitadel' in namespace '{NAMESPACE}'"
@@ -482,17 +661,25 @@ login:
.interpret(inventory, topology)
.await;
let protocol = if self.external_secure {
"https"
} else {
"http"
};
match &result {
Ok(_) => info!(
"[Zitadel] Helm chart deployed successfully\n\n\
===== ZITADEL DEPLOYMENT COMPLETE =====\n\
Login URL: https://{host}\n\
Username: admin@zitadel.{host}\n\
Login URL: {protocol}://{host}\n\
Username: admin\n\
Password: {admin_password}\n\n\
IMPORTANT: The password is saved in ConfigMap 'zitadel-config-yaml'\n\
and must be changed on first login. Save the credentials in a\n\
secure location after changing them.\n\
========================================="
=========================================",
protocol = protocol,
host = self.host,
admin_password = admin_password
),
Err(e) => error!("[Zitadel] Helm chart deployment failed: {e}"),
}

View File

@@ -37,10 +37,7 @@ async fn main() -> anyhow::Result<()> {
env_logger::init();
let sqlite = SqliteSource::default().await?;
let manager = ConfigManager::new(vec![
Arc::new(EnvSource),
Arc::new(sqlite),
]);
let manager = ConfigManager::new(vec![Arc::new(EnvSource), Arc::new(sqlite)]);
info!("1. Attempting to get TestConfig (expect NotFound on first run)...");
match manager.get::<TestConfig>().await {
@@ -74,7 +71,10 @@ async fn main() -> anyhow::Result<()> {
count: 99,
};
unsafe {
std::env::set_var("HARMONY_CONFIG_TestConfig", serde_json::to_string(&env_config)?);
std::env::set_var(
"HARMONY_CONFIG_TestConfig",
serde_json::to_string(&env_config)?,
);
}
let from_env: TestConfig = manager.get().await?;
info!(" Got from env: {:?}", from_env);

View File

@@ -0,0 +1,190 @@
//! End-to-end example: harmony_config with OpenBao as a ConfigSource
//!
//! This example demonstrates the full config resolution chain:
//! EnvSource → SqliteSource → StoreSource<OpenbaoSecretStore>
//!
//! When OpenBao is unreachable, the chain gracefully falls through to SQLite.
//!
//! **Prerequisites**:
//! - OpenBao must be initialized and unsealed
//! - KV v2 engine must be enabled at the `OPENBAO_KV_MOUNT` path (default: `secret`)
//! - Auth method must be enabled at the `OPENBAO_AUTH_MOUNT` path (default: `userpass`)
//!
//! **Environment variables**:
//! - `OPENBAO_URL` (required for OpenBao): URL of the OpenBao server
//! - `OPENBAO_TOKEN` (optional): Use token auth instead of userpass
//! - `OPENBAO_USERNAME` + `OPENBAO_PASSWORD` (optional): Userpass auth
//! - `OPENBAO_KV_MOUNT` (default: `secret`): KV v2 engine mount path
//! - `OPENBAO_AUTH_MOUNT` (default: `userpass`): Auth method mount path
//! - `OPENBAO_SKIP_TLS` (default: `false`): Skip TLS verification
//! - `HARMONY_SSO_URL` + `HARMONY_SSO_CLIENT_ID` (optional): Zitadel OIDC device flow (RFC 8628)
//!
//! **Run**:
//! ```bash
//! # Without OpenBao (SqliteSource only):
//! cargo run --example openbao_chain
//!
//! # With OpenBao (full chain):
//! export OPENBAO_URL="http://127.0.0.1:8200"
//! export OPENBAO_TOKEN="<your-token>"
//! cargo run --example openbao_chain
//! ```
//!
//! **Setup OpenBao** (if needed):
//! ```bash
//! # Port-forward to local OpenBao
//! kubectl port-forward svc/openbao -n openbao 8200:8200 &
//!
//! # Initialize (one-time)
//! kubectl exec -n openbao openbao-0 -- bao operator init
//!
//! # Enable KV and userpass (one-time)
//! kubectl exec -n openbao openbao-0 -- bao secrets enable -path=secret kv-v2
//! kubectl exec -n openbao openbao-0 -- bao auth enable userpass
//!
//! # Create test user
//! kubectl exec -n openbao openbao-0 -- bao write auth/userpass/users/testuser \
//! password="testpass" policies="default"
//! ```
use std::sync::Arc;
use harmony_config::{Config, ConfigManager, ConfigSource, EnvSource, SqliteSource, StoreSource};
use harmony_secret::OpenbaoSecretStore;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialEq)]
struct AppConfig {
host: String,
port: u16,
}
impl Default for AppConfig {
fn default() -> Self {
Self {
host: "localhost".to_string(),
port: 8080,
}
}
}
impl Config for AppConfig {
const KEY: &'static str = "AppConfig";
}
async fn build_manager() -> ConfigManager {
let sqlite = Arc::new(
SqliteSource::default()
.await
.expect("Failed to open SQLite database"),
);
let env_source: Arc<dyn ConfigSource> = Arc::new(EnvSource);
let openbao_url = std::env::var("OPENBAO_URL")
.or_else(|_| std::env::var("VAULT_ADDR"))
.ok();
match openbao_url {
Some(url) => {
let kv_mount =
std::env::var("OPENBAO_KV_MOUNT").unwrap_or_else(|_| "secret".to_string());
let auth_mount =
std::env::var("OPENBAO_AUTH_MOUNT").unwrap_or_else(|_| "userpass".to_string());
let skip_tls = std::env::var("OPENBAO_SKIP_TLS")
.map(|v| v == "true")
.unwrap_or(false);
match OpenbaoSecretStore::new(
url,
kv_mount,
auth_mount,
skip_tls,
std::env::var("OPENBAO_TOKEN").ok(),
std::env::var("OPENBAO_USERNAME").ok(),
std::env::var("OPENBAO_PASSWORD").ok(),
std::env::var("HARMONY_SSO_URL").ok(),
std::env::var("HARMONY_SSO_CLIENT_ID").ok(),
)
.await
{
Ok(store) => {
let store_source: Arc<dyn ConfigSource> =
Arc::new(StoreSource::new("harmony".to_string(), store));
println!("OpenBao connected. Full chain: env → sqlite → openbao");
ConfigManager::new(vec![env_source, Arc::clone(&sqlite) as _, store_source])
}
Err(e) => {
eprintln!(
"Warning: OpenBao unavailable ({e}), using local chain: env → sqlite"
);
ConfigManager::new(vec![env_source, sqlite])
}
}
}
None => {
println!("OPENBAO_URL not set. Using local chain: env → sqlite");
ConfigManager::new(vec![env_source, sqlite])
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let manager = build_manager().await;
println!("\n=== harmony_config OpenBao Chain Demo ===\n");
println!("1. Attempting to get AppConfig (expect NotFound on first run)...");
match manager.get::<AppConfig>().await {
Ok(config) => {
println!(" Found: {:?}", config);
}
Err(harmony_config::ConfigError::NotFound { .. }) => {
println!(" NotFound - no config stored yet");
}
Err(e) => {
println!(" Error: {:?}", e);
}
}
println!("\n2. Setting AppConfig via set()...");
let config = AppConfig {
host: "production.example.com".to_string(),
port: 443,
};
manager.set(&config).await?;
println!(" Set: {:?}", config);
println!("\n3. Getting AppConfig back...");
let retrieved: AppConfig = manager.get().await?;
println!(" Retrieved: {:?}", retrieved);
assert_eq!(config, retrieved);
println!("\n4. Demonstrating env override...");
println!(" HARMONY_CONFIG_AppConfig env var overrides all backends");
let env_config = AppConfig {
host: "env-override.example.com".to_string(),
port: 9090,
};
unsafe {
std::env::set_var(
"HARMONY_CONFIG_AppConfig",
serde_json::to_string(&env_config)?,
);
}
let from_env: AppConfig = manager.get().await?;
println!(" Got from env: {:?}", from_env);
assert_eq!(env_config.host, "env-override.example.com");
unsafe {
std::env::remove_var("HARMONY_CONFIG_AppConfig");
}
println!("\n=== Done! ===");
println!("Config persisted at ~/.local/share/harmony/config/config.db");
Ok(())
}

View File

@@ -105,12 +105,11 @@ impl ConfigManager {
let config =
T::parse_to_obj().map_err(|e| ConfigError::PromptError(e.to_string()))?;
let value = serde_json::to_value(&config).map_err(|e| {
ConfigError::Serialization {
let value =
serde_json::to_value(&config).map_err(|e| ConfigError::Serialization {
key: T::KEY.to_string(),
source: e,
}
})?;
})?;
for source in &self.sources {
if !source.should_persist() {
@@ -182,6 +181,7 @@ pub fn default_config_dir() -> Option<PathBuf> {
#[cfg(test)]
mod tests {
use super::*;
use harmony_secret::{SecretStore, SecretStoreError};
use pretty_assertions::assert_eq;
use serde::{Deserialize, Serialize};
use std::sync::Mutex;
@@ -598,7 +598,9 @@ mod tests {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf()).await.unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let manager = ConfigManager::new(vec![sqlite.clone()]);
@@ -622,7 +624,9 @@ mod tests {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf()).await.unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let env_source = Arc::new(EnvSource);
@@ -661,7 +665,9 @@ mod tests {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf()).await.unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let manager = ConfigManager::new(vec![sqlite.clone()]);
@@ -696,7 +702,10 @@ mod tests {
async fn test_prompt_source_does_not_persist() {
let source = PromptSource::new();
source
.set("TestConfig", &serde_json::json!({"name": "test", "count": 42}))
.set(
"TestConfig",
&serde_json::json!({"name": "test", "count": 42}),
)
.await
.unwrap();
@@ -709,19 +718,103 @@ mod tests {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf()).await.unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let source1 = Arc::new(MockSource::new());
let prompt_source = Arc::new(PromptSource::new());
let manager = ConfigManager::new(vec![
source1.clone(),
sqlite.clone(),
prompt_source.clone(),
]);
let manager =
ConfigManager::new(vec![source1.clone(), sqlite.clone(), prompt_source.clone()]);
let result: Result<TestConfig, ConfigError> = manager.get().await;
assert!(matches!(result, Err(ConfigError::NotFound { .. })));
}
#[derive(Debug)]
struct AlwaysErrorStore;
#[async_trait]
impl SecretStore for AlwaysErrorStore {
async fn get_raw(&self, _: &str, _: &str) -> Result<Vec<u8>, SecretStoreError> {
Err(SecretStoreError::Store("Connection refused".into()))
}
async fn set_raw(&self, _: &str, _: &str, _: &[u8]) -> Result<(), SecretStoreError> {
Err(SecretStoreError::Store("Connection refused".into()))
}
}
#[tokio::test]
async fn test_store_source_error_falls_through_to_sqlite() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let store_source = Arc::new(StoreSource::new("test".to_string(), AlwaysErrorStore));
let manager = ConfigManager::new(vec![store_source.clone(), sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite".to_string(),
count: 42,
};
sqlite
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result.name, "from_sqlite");
assert_eq!(result.count, 42);
}
#[derive(Debug)]
struct NeverFindsStore;
#[async_trait]
impl SecretStore for NeverFindsStore {
async fn get_raw(&self, _: &str, _: &str) -> Result<Vec<u8>, SecretStoreError> {
Err(SecretStoreError::NotFound {
namespace: "test".to_string(),
key: "test".to_string(),
})
}
async fn set_raw(&self, _: &str, _: &str, _: &[u8]) -> Result<(), SecretStoreError> {
Ok(())
}
}
#[tokio::test]
async fn test_store_source_not_found_falls_through_to_sqlite() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let store_source = Arc::new(StoreSource::new("test".to_string(), NeverFindsStore));
let manager = ConfigManager::new(vec![store_source.clone(), sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite".to_string(),
count: 99,
};
sqlite
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result.name, "from_sqlite");
assert_eq!(result.count, 99);
}
}

View File

@@ -39,7 +39,9 @@ impl SqliteSource {
pub async fn default() -> Result<Self, ConfigError> {
let path = crate::default_config_dir()
.ok_or_else(|| ConfigError::SqliteError("Could not determine default config directory".into()))?
.ok_or_else(|| {
ConfigError::SqliteError("Could not determine default config directory".into())
})?
.join("config.db");
Self::open(path).await
}
@@ -56,8 +58,8 @@ impl ConfigSource for SqliteSource {
match row {
Some((value,)) => {
let json_value: serde_json::Value = serde_json::from_str(&value)
.map_err(|e| ConfigError::Deserialization {
let json_value: serde_json::Value =
serde_json::from_str(&value).map_err(|e| ConfigError::Deserialization {
key: key.to_string(),
source: e,
})?;
@@ -73,12 +75,16 @@ impl ConfigSource for SqliteSource {
source: e,
})?;
sqlx::query("INSERT OR REPLACE INTO config (key, value, updated_at) VALUES (?, ?, datetime('now'))")
.bind(key)
.bind(&json_string)
.execute(&self.pool)
.await
.map_err(|e| ConfigError::SqliteError(format!("Failed to insert/update database: {}", e)))?;
sqlx::query(
"INSERT OR REPLACE INTO config (key, value, updated_at) VALUES (?, ?, datetime('now'))",
)
.bind(key)
.bind(&json_string)
.execute(&self.pool)
.await
.map_err(|e| {
ConfigError::SqliteError(format!("Failed to insert/update database: {}", e))
})?;
Ok(())
}

View File

@@ -27,7 +27,7 @@ impl<S: SecretStore + 'static> ConfigSource for StoreSource<S> {
Ok(Some(value))
}
Err(harmony_secret::SecretStoreError::NotFound { .. }) => Ok(None),
Err(e) => Err(ConfigError::StoreError(e)),
Err(_) => Ok(None),
}
}

View File

@@ -22,6 +22,8 @@ inquire.workspace = true
interactive-parse = "0.1.5"
schemars = "0.8"
vaultrs = "0.7.4"
reqwest = { workspace = true, features = ["json"] }
url.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true

View File

@@ -29,4 +29,18 @@ lazy_static! {
env::var("OPENBAO_SKIP_TLS").map(|v| v == "true").unwrap_or(false);
pub static ref OPENBAO_KV_MOUNT: String =
env::var("OPENBAO_KV_MOUNT").unwrap_or_else(|_| "secret".to_string());
pub static ref OPENBAO_AUTH_MOUNT: String =
env::var("OPENBAO_AUTH_MOUNT").unwrap_or_else(|_| "userpass".to_string());
}
lazy_static! {
// Zitadel OIDC configuration (for JWT auth flow)
pub static ref HARMONY_SSO_URL: Option<String> =
env::var("HARMONY_SSO_URL").ok();
pub static ref HARMONY_SSO_CLIENT_ID: Option<String> =
env::var("HARMONY_SSO_CLIENT_ID").ok();
pub static ref HARMONY_SSO_CLIENT_SECRET: Option<String> =
env::var("HARMONY_SSO_CLIENT_SECRET").ok();
pub static ref HARMONY_SECRETS_URL: Option<String> =
env::var("HARMONY_SECRETS_URL").ok();
}

View File

@@ -1,13 +1,16 @@
pub mod config;
mod store;
pub mod store;
use crate::config::SECRET_NAMESPACE;
use async_trait::async_trait;
use config::HARMONY_SSO_CLIENT_ID;
use config::HARMONY_SSO_URL;
use config::INFISICAL_CLIENT_ID;
use config::INFISICAL_CLIENT_SECRET;
use config::INFISICAL_ENVIRONMENT;
use config::INFISICAL_PROJECT_ID;
use config::INFISICAL_URL;
use config::OPENBAO_AUTH_MOUNT;
use config::OPENBAO_KV_MOUNT;
use config::OPENBAO_PASSWORD;
use config::OPENBAO_SKIP_TLS;
@@ -23,7 +26,7 @@ use serde::{Serialize, de::DeserializeOwned};
use std::fmt;
use store::InfisicalSecretStore;
use store::LocalFileSecretStore;
use store::OpenbaoSecretStore;
pub use store::OpenbaoSecretStore;
use thiserror::Error;
use tokio::sync::OnceCell;
@@ -85,10 +88,13 @@ async fn init_secret_manager() -> SecretManager {
let store = OpenbaoSecretStore::new(
OPENBAO_URL.clone().expect("Openbao/Vault URL must be set, see harmony_secret config for ways to provide it. You can try with OPENBAO_URL or VAULT_ADDR"),
OPENBAO_KV_MOUNT.clone(),
OPENBAO_AUTH_MOUNT.clone(),
*OPENBAO_SKIP_TLS,
OPENBAO_TOKEN.clone(),
OPENBAO_USERNAME.clone(),
OPENBAO_PASSWORD.clone(),
HARMONY_SSO_URL.clone(),
HARMONY_SSO_CLIENT_ID.clone(),
)
.await
.expect("Failed to initialize Openbao/Vault secret store");

View File

@@ -1,9 +1,8 @@
mod infisical;
mod local_file;
mod openbao;
pub mod zitadel;
pub use infisical::InfisicalSecretStore;
pub use infisical::*;
pub use local_file::LocalFileSecretStore;
pub use local_file::*;
pub use openbao::OpenbaoSecretStore;

View File

@@ -6,14 +6,10 @@ use std::fmt::Debug;
use std::fs;
use std::path::PathBuf;
use vaultrs::auth;
use vaultrs::client::{Client, VaultClient, VaultClientSettingsBuilder};
use vaultrs::client::{VaultClient, VaultClientSettingsBuilder};
use vaultrs::kv2;
/// Token response from Vault/Openbao auth endpoints
#[derive(Debug, Deserialize)]
struct TokenResponse {
auth: AuthInfo,
}
use super::zitadel::ZitadelOidcAuth;
#[derive(Debug, Serialize, Deserialize)]
struct AuthInfo {
@@ -36,6 +32,7 @@ impl From<vaultrs::api::AuthInfo> for AuthInfo {
pub struct OpenbaoSecretStore {
client: VaultClient,
kv_mount: String,
auth_mount: String,
}
impl Debug for OpenbaoSecretStore {
@@ -43,26 +40,35 @@ impl Debug for OpenbaoSecretStore {
f.debug_struct("OpenbaoSecretStore")
.field("client", &self.client.settings)
.field("kv_mount", &self.kv_mount)
.field("auth_mount", &self.auth_mount)
.finish()
}
}
impl OpenbaoSecretStore {
/// Creates a new Openbao/Vault secret store with authentication
/// Creates a new Openbao/Vault secret store with authentication.
///
/// - `kv_mount`: The KV v2 engine mount path (e.g., `"secret"`). Used for secret storage.
/// - `auth_mount`: The auth method mount path (e.g., `"userpass"`). Used for userpass authentication.
/// - `zitadel_sso_url`: Zitadel OIDC server URL (e.g., `"https://sso.example.com"`). If provided along with `zitadel_client_id`, Zitadel OIDC device flow will be attempted.
/// - `zitadel_client_id`: OIDC client ID registered in Zitadel.
pub async fn new(
base_url: String,
kv_mount: String,
auth_mount: String,
skip_tls: bool,
token: Option<String>,
username: Option<String>,
password: Option<String>,
zitadel_sso_url: Option<String>,
zitadel_client_id: Option<String>,
) -> Result<Self, SecretStoreError> {
info!("OPENBAO_STORE: Initializing client for URL: {base_url}");
// 1. If token is provided via env var, use it directly
if let Some(t) = token {
debug!("OPENBAO_STORE: Using token from environment variable");
return Self::with_token(&base_url, skip_tls, &t, &kv_mount);
return Self::with_token(&base_url, skip_tls, &t, &kv_mount, &auth_mount);
}
// 2. Try to load cached token
@@ -76,32 +82,81 @@ impl OpenbaoSecretStore {
skip_tls,
&cached_token.client_token,
&kv_mount,
&auth_mount,
);
}
warn!("OPENBAO_STORE: Cached token is invalid or expired");
}
// 3. Authenticate with username/password
// 3. Try Zitadel OIDC device flow if configured
if let (Some(sso_url), Some(client_id)) = (zitadel_sso_url, zitadel_client_id) {
info!("OPENBAO_STORE: Attempting Zitadel OIDC device flow");
match Self::authenticate_zitadel_oidc(&sso_url, &client_id, skip_tls).await {
Ok(oidc_session) => {
info!("OPENBAO_STORE: Zitadel OIDC authentication successful");
// Cache the OIDC session token
let auth_info = AuthInfo {
client_token: oidc_session.openbao_token,
lease_duration: Some(oidc_session.openbao_token_ttl),
token_type: "oidc".to_string(),
};
if let Err(e) = Self::cache_token(&cache_path, &auth_info) {
warn!("OPENBAO_STORE: Failed to cache OIDC token: {e}");
}
return Self::with_token(
&base_url,
skip_tls,
&auth_info.client_token,
&kv_mount,
&auth_mount,
);
}
Err(e) => {
warn!("OPENBAO_STORE: Zitadel OIDC failed: {e}, trying other auth methods");
}
}
}
// 4. Authenticate with username/password
let (user, pass) = match (username, password) {
(Some(u), Some(p)) => (u, p),
_ => {
return Err(SecretStoreError::Store(
"No valid token found and username/password not provided. \
Set OPENBAO_TOKEN or OPENBAO_USERNAME/OPENBAO_PASSWORD environment variables."
Set OPENBAO_TOKEN, OPENBAO_USERNAME/OPENBAO_PASSWORD, or \
HARMONY_SSO_URL/HARMONY_SSO_CLIENT_ID for Zitadel OIDC."
.into(),
));
}
};
let token =
Self::authenticate_userpass(&base_url, &kv_mount, skip_tls, &user, &pass).await?;
Self::authenticate_userpass(&base_url, &auth_mount, skip_tls, &user, &pass).await?;
// Cache the token
if let Err(e) = Self::cache_token(&cache_path, &token) {
warn!("OPENBAO_STORE: Failed to cache token: {e}");
}
Self::with_token(&base_url, skip_tls, &token.client_token, &kv_mount)
Self::with_token(
&base_url,
skip_tls,
&token.client_token,
&kv_mount,
&auth_mount,
)
}
async fn authenticate_zitadel_oidc(
sso_url: &str,
client_id: &str,
skip_tls: bool,
) -> Result<super::zitadel::OidcSession, SecretStoreError> {
let oidc_auth = ZitadelOidcAuth::new(sso_url.to_string(), client_id.to_string(), skip_tls);
oidc_auth
.authenticate()
.await
.map_err(|e| SecretStoreError::Store(e.into()))
}
/// Create a client with an existing token
@@ -110,6 +165,7 @@ impl OpenbaoSecretStore {
skip_tls: bool,
token: &str,
kv_mount: &str,
auth_mount: &str,
) -> Result<Self, SecretStoreError> {
let mut settings = VaultClientSettingsBuilder::default();
settings.address(base_url).token(token);
@@ -129,6 +185,7 @@ impl OpenbaoSecretStore {
Ok(Self {
client,
kv_mount: kv_mount.to_string(),
auth_mount: auth_mount.to_string(),
})
}
@@ -168,7 +225,6 @@ impl OpenbaoSecretStore {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
// Set file permissions to 0600 (owner read/write only)
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
@@ -209,14 +265,13 @@ impl OpenbaoSecretStore {
/// Authenticate using username/password (userpass auth method)
async fn authenticate_userpass(
base_url: &str,
kv_mount: &str,
auth_mount: &str,
skip_tls: bool,
username: &str,
password: &str,
) -> Result<AuthInfo, SecretStoreError> {
info!("OPENBAO_STORE: Authenticating with username/password");
// Create a client without a token for authentication
let mut settings = VaultClientSettingsBuilder::default();
settings.address(base_url);
if skip_tls {
@@ -230,8 +285,7 @@ impl OpenbaoSecretStore {
)
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// Authenticate using userpass method
let token = auth::userpass::login(&client, kv_mount, username, password)
let token = auth::userpass::login(&client, auth_mount, username, password)
.await
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
@@ -249,7 +303,6 @@ impl SecretStore for OpenbaoSecretStore {
let data: serde_json::Value = kv2::read(&self.client, &self.kv_mount, &path)
.await
.map_err(|e| {
// Check for not found error
if e.to_string().contains("does not exist") || e.to_string().contains("404") {
SecretStoreError::NotFound {
namespace: namespace.to_string(),
@@ -260,7 +313,6 @@ impl SecretStore for OpenbaoSecretStore {
}
})?;
// Extract the actual secret value stored under the "value" key
let value = data.get("value").and_then(|v| v.as_str()).ok_or_else(|| {
SecretStoreError::Store("Secret does not contain expected 'value' field".into())
})?;
@@ -281,7 +333,6 @@ impl SecretStore for OpenbaoSecretStore {
let value_str =
String::from_utf8(val.to_vec()).map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// Create the data structure expected by our format
let data = serde_json::json!({
"value": value_str
});

View File

@@ -0,0 +1,335 @@
use log::{debug, info};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OidcSession {
pub openbao_token: String,
pub openbao_token_ttl: u64,
pub openbao_renewable: bool,
pub refresh_token: Option<String>,
pub id_token: Option<String>,
pub expires_at: Option<i64>,
}
impl OidcSession {
pub fn is_expired(&self) -> bool {
if let Some(expires_at) = self.expires_at {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
expires_at <= now
} else {
false
}
}
pub fn is_openbao_token_expired(&self, _ttl: u64) -> bool {
if let Some(expires_at) = self.expires_at {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
expires_at <= now
} else {
false
}
}
}
#[derive(Debug, Deserialize)]
struct DeviceAuthorizationResponse {
device_code: String,
user_code: String,
verification_uri: String,
#[serde(rename = "verification_uri_complete")]
verification_uri_complete: Option<String>,
expires_in: u64,
interval: u64,
}
#[derive(Debug, Deserialize)]
struct TokenResponse {
access_token: String,
#[serde(rename = "expires_in", default)]
expires_in: Option<u64>,
#[serde(rename = "refresh_token", default)]
refresh_token: Option<String>,
#[serde(rename = "id_token", default)]
id_token: Option<String>,
}
#[derive(Debug, Deserialize)]
struct TokenErrorResponse {
error: String,
#[serde(rename = "error_description")]
error_description: Option<String>,
}
fn get_session_cache_path() -> PathBuf {
let hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
"zitadel-oidc".hash(&mut hasher);
format!("{:016x}", hasher.finish())
};
directories::BaseDirs::new()
.map(|dirs| {
dirs.data_dir()
.join("harmony")
.join("secrets")
.join(format!("oidc_session_{hash}"))
})
.unwrap_or_else(|| PathBuf::from(format!("/tmp/oidc_session_{hash}")))
}
fn load_session() -> Result<OidcSession, String> {
let path = get_session_cache_path();
serde_json::from_str(
&fs::read_to_string(&path)
.map_err(|e| format!("Could not load session from {path:?}: {e}"))?,
)
.map_err(|e| format!("Could not deserialize session from {path:?}: {e}"))
}
fn save_session(session: &OidcSession) -> Result<(), String> {
let path = get_session_cache_path();
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("Could not create session directory: {e}"))?;
}
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
let mut file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(&path)
.map_err(|e| format!("Could not open session file: {e}"))?;
use std::io::Write;
file.write_all(
serde_json::to_string_pretty(session)
.map_err(|e| format!("Could not serialize session: {e}"))?
.as_bytes(),
)
.map_err(|e| format!("Could not write session file: {e}"))?;
}
#[cfg(not(unix))]
{
fs::write(
&path,
serde_json::to_string_pretty(session).map_err(|e| e.to_string())?,
)
.map_err(|e| format!("Could not write session file: {e}"))?;
}
Ok(())
}
pub struct ZitadelOidcAuth {
sso_url: String,
client_id: String,
skip_tls: bool,
}
impl ZitadelOidcAuth {
pub fn new(sso_url: String, client_id: String, skip_tls: bool) -> Self {
Self {
sso_url,
client_id,
skip_tls,
}
}
pub async fn authenticate(&self) -> Result<OidcSession, String> {
if let Ok(session) = load_session() {
if !session.is_expired() {
info!("ZITADEL_OIDC: Using cached session");
return Ok(session);
}
}
info!("ZITADEL_OIDC: Starting device authorization flow");
let device_code = self.request_device_code().await?;
self.print_verification_instructions(&device_code);
let token_response = self
.poll_for_token(&device_code, device_code.interval)
.await?;
let session = self.process_token_response(token_response).await?;
let _ = save_session(&session);
Ok(session)
}
fn http_client(&self) -> Result<reqwest::Client, String> {
let mut builder = reqwest::Client::builder();
if self.skip_tls {
builder = builder.danger_accept_invalid_certs(true);
}
builder
.build()
.map_err(|e| format!("Failed to build HTTP client: {e}"))
}
async fn request_device_code(&self) -> Result<DeviceAuthorizationResponse, String> {
let client = self.http_client()?;
let params = [
("client_id", self.client_id.as_str()),
("scope", "openid email profile offline_access"),
];
let response = client
.post(format!("{}/oauth/v2/device_authorization", self.sso_url))
.form(&params)
.send()
.await
.map_err(|e| format!("Device authorization request failed: {e}"))?;
response
.json::<DeviceAuthorizationResponse>()
.await
.map_err(|e| format!("Failed to parse device authorization response: {e}"))
}
fn print_verification_instructions(&self, code: &DeviceAuthorizationResponse) {
println!();
println!("=================================================");
println!("[Harmony] To authenticate with Zitadel, open your browser:");
println!(" {}", code.verification_uri);
println!();
println!(" and enter code: {}", code.user_code);
if let Some(ref complete_url) = code.verification_uri_complete {
println!();
println!(" Or visit this direct link (code is pre-filled):");
println!(" {}", complete_url);
}
println!("=================================================");
println!();
}
async fn poll_for_token(
&self,
code: &DeviceAuthorizationResponse,
interval_secs: u64,
) -> Result<TokenResponse, String> {
let client = self.http_client()?;
let params = [
("grant_type", "urn:ietf:params:oauth:grant-type:device_code"),
("device_code", code.device_code.as_str()),
("client_id", self.client_id.as_str()),
];
let interval = Duration::from_secs(interval_secs.max(5));
let max_attempts = (code.expires_in / interval_secs).max(60) as usize;
for attempt in 0..max_attempts {
tokio::time::sleep(interval).await;
let response = client
.post(format!("{}/oauth/v2/token", self.sso_url))
.form(&params)
.send()
.await
.map_err(|e| format!("Token request failed: {e}"))?;
let status = response.status();
let body = response
.text()
.await
.map_err(|e| format!("Failed to read response body: {e}"))?;
if status == 400 {
if let Ok(error) = serde_json::from_str::<TokenErrorResponse>(&body) {
match error.error.as_str() {
"authorization_pending" => {
debug!("ZITADEL_OIDC: authorization_pending (attempt {})", attempt);
continue;
}
"slow_down" => {
debug!("ZITADEL_OIDC: slow_down, increasing interval");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
"expired_token" => {
return Err(
"Device code expired. Please restart authentication.".to_string()
);
}
"access_denied" => {
return Err("Access denied by user.".to_string());
}
_ => {
return Err(format!(
"OAuth error: {} - {}",
error.error,
error.error_description.unwrap_or_default()
));
}
}
}
}
return serde_json::from_str(&body)
.map_err(|e| format!("Failed to parse token response: {e}"));
}
Err("Token polling timed out".to_string())
}
async fn process_token_response(&self, response: TokenResponse) -> Result<OidcSession, String> {
let expires_at = response.expires_in.map(|ttl| {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
now + ttl as i64
});
Ok(OidcSession {
openbao_token: response.access_token,
openbao_token_ttl: response.expires_in.unwrap_or(3600),
openbao_renewable: true,
refresh_token: response.refresh_token,
id_token: response.id_token,
expires_at,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_oidc_session_is_expired() {
let session = OidcSession {
openbao_token: "test".to_string(),
openbao_token_ttl: 3600,
openbao_renewable: true,
refresh_token: None,
id_token: None,
expires_at: Some(0),
};
assert!(session.is_expired());
let future = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0)
+ 3600;
let session2 = OidcSession {
openbao_token: "test".to_string(),
openbao_token_ttl: 3600,
openbao_renewable: true,
refresh_token: None,
id_token: None,
expires_at: Some(future),
};
assert!(!session2.is_expired());
}
}

View File

@@ -10,6 +10,31 @@ const K3D_BIN_FILE_NAME: &str = "k3d";
pub struct K3d {
base_dir: PathBuf,
cluster_name: Option<String>,
port_mappings: Vec<PortMapping>,
}
#[derive(Debug, Clone)]
pub struct PortMapping {
pub host_port: u32,
pub container_port: u32,
pub loadbalancer: String,
}
impl PortMapping {
pub fn new(host_port: u32, container_port: u32) -> Self {
Self {
host_port,
container_port,
loadbalancer: "loadbalancer".to_string(),
}
}
pub fn to_arg(&self) -> String {
format!(
"{}:{}@{}",
self.host_port, self.container_port, self.loadbalancer
)
}
}
impl K3d {
@@ -17,9 +42,15 @@ impl K3d {
Self {
base_dir,
cluster_name,
port_mappings: Vec::new(),
}
}
pub fn with_port_mappings(mut self, mappings: Vec<PortMapping>) -> Self {
self.port_mappings = mappings;
self
}
async fn get_binary_for_current_platform(
&self,
latest_release: octocrab::models::repos::Release,
@@ -329,14 +360,29 @@ impl K3d {
}
fn create_cluster(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["cluster", "create", cluster_name])?;
let mut args = vec![
"cluster".to_string(),
"create".to_string(),
cluster_name.to_string(),
];
for mapping in &self.port_mappings {
args.push("-p".to_string());
args.push(mapping.to_arg());
}
let output = self.run_k3d_command(args)?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to create cluster: {}", stderr));
}
info!("Successfully created k3d cluster '{}'", cluster_name);
info!(
"Successfully created k3d cluster '{}' with {} port mappings",
cluster_name,
self.port_mappings.len()
);
Ok(())
}