Compare commits

..

21 Commits

Author SHA1 Message Date
fb72e94dbb fix: when creating a port-channel with forced speed, it needs to be set on the port-channel and its member interfaces
Some checks failed
Run Check Script / check (pull_request) Failing after 11m29s
2026-04-10 15:41:09 -04:00
a646f1f4d0 feat: use an enum for interface types, add logging
Some checks failed
Run Check Script / check (pull_request) Failing after 23s
2026-03-25 12:35:54 -04:00
2728fc8989 feat: add possibility to configure interface speed
Some checks failed
Run Check Script / check (pull_request) Failing after 26s
2026-03-25 12:10:39 -04:00
8c8baaf9cc feat: create new brocade configuration score
Some checks failed
Run Check Script / check (pull_request) Failing after 24s
2026-03-25 11:45:57 -04:00
a1c9bfeabd feat: add a 'reset_interface' function
Some checks failed
Run Check Script / check (pull_request) Failing after 24s
2026-03-25 09:58:56 -04:00
d8dab12834 set the description of the port-channel interface
Some checks failed
Run Check Script / check (pull_request) Failing after 28s
2026-03-25 09:35:02 -04:00
7422534018 feat: require to specify port-channel ID instead of finding an available one
Some checks failed
Run Check Script / check (pull_request) Failing after 24s
2026-03-25 09:21:09 -04:00
b67275662d fix: use Vlan struct instance everywhere, never use an u16 to reference a Vlan
Some checks failed
Run Check Script / check (pull_request) Failing after 26s
2026-03-24 15:48:16 -04:00
6237e1d877 feat: brocade module now support vlans
Some checks failed
Run Check Script / check (pull_request) Failing after 27s
2026-03-24 15:24:32 -04:00
88e6990051 feat(opnsense-api): examples to list packages and dnsmasq settings now working 2026-03-24 14:07:47 -04:00
8e9f8ce405 wip: opnsense-api crate to replace opnsense-config-xml 2026-03-24 13:26:36 -04:00
d87aa3c7e9 fix opnsense sumbodule url 2026-03-24 10:51:38 -04:00
90ec2b524a wip(codegen): generates ir and rust code successfully but not really tested yet 2026-03-24 10:23:52 -04:00
5572f98d5f wip(opnsense-codegen): Can now create IR that looks good from example, successfully parses real models too 2026-03-24 09:32:21 -04:00
8024e0d5c3 wip: opnsense codegen 2026-03-24 07:13:53 -04:00
238e7da175 feat: opnsense codegen basic example scaffolded, now we can start implementing real models 2026-03-23 23:27:40 -04:00
bf84bffd57 wip: config + secret merge with e2e sso examples incoming 2026-03-23 23:26:42 -04:00
d4613e42d3 wip: openbao + zitadel e2e setup and test for harmony_config 2026-03-22 21:27:06 -04:00
6a57361356 chore: Update config roadmap
Some checks failed
Run Check Script / check (pull_request) Failing after 12s
2026-03-22 19:04:16 -04:00
d0d4f15122 feat(config): Example prompting
Some checks failed
Run Check Script / check (pull_request) Failing after 14s
2026-03-22 18:18:57 -04:00
93b83b8161 feat(config): Sqlite storage and example 2026-03-22 17:43:12 -04:00
67 changed files with 8783 additions and 325 deletions

12
.gitmodules vendored
View File

@@ -1,3 +1,15 @@
[submodule "examples/try_rust_webapp/tryrust.org"]
path = examples/try_rust_webapp/tryrust.org
url = https://github.com/rust-dd/tryrust.org.git
[submodule "/home/jeangab/work/nationtech/harmony2/opnsense-codegen/vendor/core"]
path = /home/jeangab/work/nationtech/harmony2/opnsense-codegen/vendor/core
url = https://github.com/opnsense/core.git
[submodule "/home/jeangab/work/nationtech/harmony2/opnsense-codegen/vendor/plugins"]
path = /home/jeangab/work/nationtech/harmony2/opnsense-codegen/vendor/plugins
url = https://github.com/opnsense/plugins.git
[submodule "opnsense-codegen/vendor/core"]
path = opnsense-codegen/vendor/core
url = https://github.com/opnsense/core.git
[submodule "opnsense-codegen/vendor/plugins"]
path = opnsense-codegen/vendor/plugins
url = https://github.com/opnsense/plugins.git

108
Cargo.lock generated
View File

@@ -1262,6 +1262,22 @@ dependencies = [
"url",
]
[[package]]
name = "brocade-switch-configuration"
version = "0.1.0"
dependencies = [
"async-trait",
"brocade",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_types",
"log",
"serde",
"tokio",
]
[[package]]
name = "brotli"
version = "8.0.2"
@@ -2597,6 +2613,29 @@ dependencies = [
"url",
]
[[package]]
name = "example-harmony-sso"
version = "0.1.0"
dependencies = [
"anyhow",
"directories",
"env_logger",
"harmony",
"harmony_cli",
"harmony_config",
"harmony_macros",
"harmony_secret",
"harmony_types",
"k3d-rs",
"kube",
"log",
"reqwest 0.12.28",
"serde",
"serde_json",
"tokio",
"url",
]
[[package]]
name = "example-k8s-drain-node"
version = "0.1.0"
@@ -3670,8 +3709,10 @@ dependencies = [
name = "harmony_config"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"directories",
"env_logger",
"harmony_config_derive",
"harmony_secret",
"inquire 0.7.5",
@@ -3681,6 +3722,7 @@ dependencies = [
"schemars 0.8.22",
"serde",
"serde_json",
"sqlx",
"tempfile",
"thiserror 2.0.18",
"tokio",
@@ -3765,12 +3807,14 @@ dependencies = [
"lazy_static",
"log",
"pretty_assertions",
"reqwest 0.12.28",
"schemars 0.8.22",
"serde",
"serde_json",
"tempfile",
"thiserror 2.0.18",
"tokio",
"url",
"vaultrs",
]
@@ -4596,6 +4640,26 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "json-prompt"
version = "0.1.0"
dependencies = [
"brocade",
"cidr",
"env_logger",
"harmony",
"harmony_cli",
"harmony_macros",
"harmony_secret",
"harmony_secret_derive",
"harmony_types",
"log",
"schemars 0.8.22",
"serde",
"tokio",
"url",
]
[[package]]
name = "jsonpath-rust"
version = "0.7.5"
@@ -5243,6 +5307,40 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
[[package]]
name = "opnsense-api"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"env_logger",
"http 1.4.0",
"inquire 0.7.5",
"log",
"pretty_assertions",
"reqwest 0.12.28",
"serde",
"serde_json",
"thiserror 2.0.18",
"tokio",
"tokio-test",
]
[[package]]
name = "opnsense-codegen"
version = "0.1.0"
dependencies = [
"clap",
"env_logger",
"heck",
"log",
"pretty_assertions",
"quick-xml",
"serde",
"serde_json",
"thiserror 2.0.18",
"toml",
]
[[package]]
name = "opnsense-config"
version = "0.1.0"
@@ -5753,6 +5851,16 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9e1dcb320d6839f6edb64f7a4a59d39b30480d4d1765b56873f7c858538a5fe"
[[package]]
name = "quick-xml"
version = "0.37.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb"
dependencies = [
"memchr",
"serde",
]
[[package]]
name = "quinn"
version = "0.11.9"

View File

@@ -25,7 +25,7 @@ members = [
"harmony_agent/deploy",
"harmony_node_readiness",
"harmony-k8s",
"harmony_assets",
"harmony_assets", "opnsense-codegen", "opnsense-api",
]
[workspace.package]
@@ -92,3 +92,5 @@ reqwest = { version = "0.12", features = [
], default-features = false }
assertor = "0.0.4"
tokio-test = "0.4"
anyhow = "1.0"
clap = { version = "4", features = ["derive"] }

View File

@@ -6,172 +6,618 @@ Make `harmony_config` production-ready with a seamless first-run experience: clo
## Current State
`harmony_config` exists with:
`harmony_config` now has:
- `Config` trait + `#[derive(Config)]` macro
- `ConfigManager` with ordered source chain
- Four `ConfigSource` implementations:
- Five `ConfigSource` implementations:
- `EnvSource` — reads `HARMONY_CONFIG_{KEY}` env vars
- `LocalFileSource` — reads/writes `{key}.json` files from a directory
- `PromptSource`**stub** (returns `None` / no-ops on set)
- `SqliteSource`**NEW** reads/writes to SQLite database
- `PromptSource` — returns `None` / no-op on set (placeholder for TUI integration)
- `StoreSource<S: SecretStore>` — wraps any `harmony_secret::SecretStore` backend
- 12 unit tests (mock source, env, local file)
- 26 unit tests (mock source, env, local file, sqlite, prompt, integration, store graceful fallback)
- Global `CONFIG_MANAGER` static with `init()`, `get()`, `get_or_prompt()`, `set()`
- Two examples: `basic` and `prompting` in `harmony_config/examples/`
- **Zero workspace consumers** — nothing calls `harmony_config` yet
## Tasks
### 1.1 Add `SqliteSource` as the default zero-setup backend
### 1.1 Add `SqliteSource` as the default zero-setup backend
Replace `LocalFileSource` (JSON files scattered in a directory) with a single SQLite database as the default local backend. `sqlx` with SQLite is already a workspace dependency.
**Status**: Implemented
```rust
// harmony_config/src/source/sqlite.rs
pub struct SqliteSource {
pool: SqlitePool,
}
**Implementation Details**:
impl SqliteSource {
/// Opens or creates the database at the given path.
/// Creates the `config` table if it doesn't exist.
pub async fn open(path: PathBuf) -> Result<Self, ConfigError>
- Database location: `~/.local/share/harmony/config/config.db` (directory is auto-created)
- Schema: `config(key TEXT PRIMARY KEY, value TEXT NOT NULL, updated_at TEXT NOT NULL DEFAULT (datetime('now')))`
- Uses `sqlx` with SQLite runtime
- `SqliteSource::open(path)` - opens/creates database at given path
- `SqliteSource::default()` - uses default Harmony data directory
/// Uses the default Harmony data directory:
/// ~/.local/share/harmony/config.db (Linux)
pub async fn default() -> Result<Self, ConfigError>
}
**Files**:
- `harmony_config/src/source/sqlite.rs` - new file
- `harmony_config/Cargo.toml` - added `sqlx = { workspace = true, features = ["runtime-tokio", "sqlite"] }`
- `Cargo.toml` - added `anyhow = "1.0"` to workspace dependencies
#[async_trait]
impl ConfigSource for SqliteSource {
async fn get(&self, key: &str) -> Result<Option<serde_json::Value>, ConfigError>
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError>
}
```
Schema:
```sql
CREATE TABLE IF NOT EXISTS config (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
```
**Tests**:
**Tests** (all passing):
- `test_sqlite_set_and_get` — round-trip a `TestConfig` struct
- `test_sqlite_get_returns_none_when_missing` — key not in DB
- `test_sqlite_overwrites_on_set` — set twice, get returns latest
- `test_sqlite_concurrent_access` — two tasks writing different keys simultaneously
- All tests use `tempfile::NamedTempFile` for the DB path
### 1.1.1 Add Config example to show exact DX and confirm functionality
### 1.1.1 Add Config example to show exact DX and confirm functionality
Create `harmony_config/examples` that show how to use config crate with various backends.
**Status**: Implemented
Show how to use the derive macros, how to store secrets in a local backend or a zitadel + openbao backend, how to fetch them from environment variables, etc. Explicitely outline the dependencies for examples with dependencies in a comment at the top. Explain how to configure zitadel + openbao for this backend. The local backend should have zero dependency, zero setup, storing its config/secrets with sane defaults.
**Examples created**:
Also show that a Config with default values will not prompt for values with defaults.
1. `harmony_config/examples/basic.rs` - demonstrates:
- Zero-setup SQLite backend (auto-creates directory)
- Using the `#[derive(Config)]` macro
- Environment variable override (`HARMONY_CONFIG_TestConfig` overrides SQLite)
- Direct set/get operations
- Persistence verification
### 1.2 Make `PromptSource` functional
2. `harmony_config/examples/prompting.rs` - demonstrates:
- Config with no defaults (requires user input via `inquire`)
- `get()` flow: env > sqlite > prompt fallback
- `get_or_prompt()` for interactive configuration
- Full resolution chain
- Persistence of prompted values
Currently `PromptSource::get()` returns `None` and `set()` is a no-op. Wire it to `interactive_parse::InteractiveParseObj`:
### 1.2 Make `PromptSource` functional ✅
**Status**: Implemented with design improvement
**Key Finding - Bug Fixed During Implementation**:
The original design had a critical bug in `get_or_prompt()`:
```rust
#[async_trait]
impl ConfigSource for PromptSource {
async fn get(&self, _key: &str) -> Result<Option<serde_json::Value>, ConfigError> {
// PromptSource never "has" a value — it's always a fallback.
// The actual prompting happens in ConfigManager::get_or_prompt().
Ok(None)
}
async fn set(&self, _key: &str, _value: &serde_json::Value) -> Result<(), ConfigError> {
// Prompt source doesn't persist. Other sources in the chain do.
Ok(())
// OLD (BUGGY) - breaks on first source where set() returns Ok(())
for source in &self.sources {
if source.set(T::KEY, &value).await.is_ok() {
break;
}
}
```
The prompting logic is already in `ConfigManager::get_or_prompt()` via `T::parse_to_obj()`. The `PromptSource` struct exists mainly to hold the `PROMPT_MUTEX` and potentially a custom writer for TUI integration later.
Since `EnvSource.set()` returns `Ok(())` (successfully sets env var), the loop would break immediately and never write to `SqliteSource`. Prompted values were never persisted!
**Key fix**: Ensure `get_or_prompt()` persists the prompted value to the **first writable source** (SQLite), not to all sources. Current code tries all sources — this is wrong for prompt-then-persist because you don't want to write prompted values to env vars.
**Solution - Added `should_persist()` method to ConfigSource trait**:
```rust
pub async fn get_or_prompt<T: Config>(&self) -> Result<T, ConfigError> {
match self.get::<T>().await {
Ok(config) => Ok(config),
Err(ConfigError::NotFound { .. }) => {
let config = T::parse_to_obj()
.map_err(|e| ConfigError::PromptError(e.to_string()))?;
let value = serde_json::to_value(&config)
.map_err(|e| ConfigError::Serialization { key: T::KEY.to_string(), source: e })?;
#[async_trait]
pub trait ConfigSource: Send + Sync {
async fn get(&self, key: &str) -> Result<Option<serde_json::Value>, ConfigError>;
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError>;
fn should_persist(&self) -> bool {
true
}
}
```
// Persist to the first source that accepts writes (skip EnvSource)
for source in &self.sources {
if source.set(T::KEY, &value).await.is_ok() {
break;
}
}
Ok(config)
}
Err(e) => Err(e),
- `EnvSource::should_persist()` returns `false` - shouldn't persist prompted values to env vars
- `PromptSource::should_persist()` returns `false` - doesn't persist anyway
- `get_or_prompt()` now skips sources where `should_persist()` is `false`
**Updated `get_or_prompt()`**:
```rust
for source in &self.sources {
if !source.should_persist() {
continue;
}
if source.set(T::KEY, &value).await.is_ok() {
break;
}
}
```
**Tests**:
- `test_get_or_prompt_persists_to_first_writable_source` — mock source chain where first source is read-only, second is writable. Verify prompted value lands in second source.
- `test_prompt_source_always_returns_none`
- `test_prompt_source_set_is_noop`
- `test_prompt_source_does_not_persist`
- `test_full_chain_with_prompt_source_falls_through_to_prompt`
### 1.3 Integration test: full resolution chain
### 1.3 Integration test: full resolution chain
Test the complete priority chain: env > sqlite > prompt.
**Status**: Implemented
**Tests**:
- `test_full_resolution_chain_sqlite_fallback` — env not set, sqlite has value, get() returns sqlite
- `test_full_resolution_chain_env_overrides_sqlite` — env set, sqlite has value, get() returns env
- `test_branch_switching_scenario_deserialization_error` — old struct shape in sqlite returns Deserialization error
### 1.4 Validate Zitadel + OpenBao integration path ⏳
**Status**: Planning phase - detailed execution plan below
**Background**: ADR 020-1 documents the target architecture for Zitadel OIDC + OpenBao integration. This task validates the full chain by deploying Zitadel and OpenBao on a local k3d cluster and demonstrating an end-to-end example.
**Architecture Overview**:
```
┌─────────────────────────────────────────────────────────────────────┐
│ Harmony CLI / App │
│ │
│ ConfigManager: │
│ 1. EnvSource ← HARMONY_CONFIG_* env vars (highest priority) │
│ 2. SqliteSource ← ~/.local/share/harmony/config/config.db │
│ 3. StoreSource ← OpenBao (team-scale, via Zitadel OIDC) │
│ │
│ When StoreSource fails (OpenBao unreachable): │
│ → returns Ok(None), chain falls through to SqliteSource │
└─────────────────────────────────────────────────────────────────────┘
┌──────────────────┐ ┌──────────────────┐
│ Zitadel │ │ OpenBao │
│ (IdP + OIDC) │ │ (Secret Store) │
│ │ │ │
│ Device Auth │────JWT──▶│ JWT Auth │
│ Flow (RFC 8628)│ │ Method │
└──────────────────┘ └──────────────────┘
```
**Prerequisites**:
- Docker running (for k3d)
- Rust toolchain (edition 2024)
- Network access to download Helm charts
- `kubectl` (installed automatically with k3d, or pre-installed)
**Step-by-Step Execution Plan**:
#### Step 1: Create k3d cluster for local development
When you run `cargo run -p example-zitadel` (or any example using `K8sAnywhereTopology::from_env()`), Harmony automatically provisions a k3d cluster if one does not exist. By default:
- `use_local_k3d = true` (env: `HARMONY_USE_LOCAL_K3D`, default `true`)
- `autoinstall = true` (env: `HARMONY_AUTOINSTALL`, default `true`)
- Cluster name: **`harmony`** (hardcoded in `K3DInstallationScore::default()`)
- k3d binary is downloaded to `~/.local/share/harmony/k3d/`
- Kubeconfig is merged into `~/.kube/config`, context set to `k3d-harmony`
No manual `k3d cluster create` is needed. If you want to create the cluster manually first:
```bash
# Install k3d (requires sudo or install to user path)
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
# Create the cluster with the same name Harmony expects
k3d cluster create harmony
kubectl cluster-info --context k3d-harmony
```
**Validation**: `kubectl get nodes --context k3d-harmony` shows 1 server node (k3d default)
**Note**: The existing examples use hardcoded external hostnames (e.g., `sso.sto1.nationtech.io`) for ingress. On a local k3d cluster, these hostnames are not routable. For local development you must either:
- Use `kubectl port-forward` to access services directly
- Configure `/etc/hosts` entries pointing to `127.0.0.1`
- Use a k3d loadbalancer with `--port` mappings
#### Step 2: Deploy Zitadel
Zitadel requires the topology to implement `Topology + K8sclient + HelmCommand + PostgreSQL`. The `K8sAnywhereTopology` satisfies all four.
```bash
cargo run -p example-zitadel
```
**What happens internally** (see `harmony/src/modules/zitadel/mod.rs`):
1. Creates `zitadel` namespace via `K8sResourceScore`
2. Deploys a CNPG PostgreSQL cluster:
- Name: `zitadel-pg`
- Instances: **2** (not 1)
- Storage: 10Gi
- Namespace: `zitadel`
3. Resolves the internal DB endpoint (`host:port`) from the CNPG cluster
4. Generates a 32-byte alphanumeric masterkey, stores it as Kubernetes Secret `zitadel-masterkey` (idempotent: skips if it already exists)
5. Generates a 16-char admin password (guaranteed 1+ uppercase, lowercase, digit, symbol)
6. Deploys Zitadel Helm chart (`zitadel/zitadel` from `https://charts.zitadel.com`):
- `chart_version: None` -- **uses latest chart version** (not pinned)
- No `--wait` flag -- returns before pods are ready
- Ingress annotations are **OpenShift-oriented** (`route.openshift.io/termination: edge`, `cert-manager.io/cluster-issuer: letsencrypt-prod`). On k3d these annotations are silently ignored.
- Ingress includes TLS config with `secretName: "{host}-tls"`, which requires cert-manager. Without cert-manager, TLS termination does not happen at the ingress level.
**Key Helm values set by ZitadelScore**:
- `zitadel.configmapConfig.ExternalDomain`: the `host` field (e.g., `sso.sto1.nationtech.io`)
- `zitadel.configmapConfig.ExternalSecure: true`
- `zitadel.configmapConfig.TLS.Enabled: false` (TLS at ingress, not in Zitadel)
- Admin user: `UserName: "admin"`, Email: **`admin@zitadel.example.com`** (hardcoded, not derived from host)
- Database credentials: injected via `env[].valueFrom.secretKeyRef` from secret `zitadel-pg-superuser` (both user and admin use the same superuser -- there is a TODO to fix this)
**Expected output**:
```
===== ZITADEL DEPLOYMENT COMPLETE =====
Login URL: https://sso.sto1.nationtech.io
Username: admin@zitadel.sso.sto1.nationtech.io
Password: <generated 16-char password>
```
**Note on the success message**: The printed username `admin@zitadel.{host}` does not match the actual configured email `admin@zitadel.example.com`. The actual login username in Zitadel is `admin` (the `UserName` field). This discrepancy exists in the current code.
**Validation on k3d**:
```bash
# Wait for pods to be ready (Helm returns before readiness)
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=zitadel -n zitadel --timeout=300s
# Port-forward to access Zitadel (ingress won't work without proper DNS/TLS on k3d)
kubectl port-forward svc/zitadel -n zitadel 8080:8080
# Access at http://localhost:8080 (note: ExternalSecure=true may cause redirect issues)
```
**Known issues for k3d deployment**:
- `ExternalSecure: true` tells Zitadel to expect HTTPS, but k3d port-forward is HTTP. This may cause redirect loops. Override with: modify the example to set `ExternalSecure: false` for local dev.
- The CNPG operator must be installed on the cluster. `K8sAnywhereTopology` handles this via the `PostgreSQL` trait implementation, which deploys the operator first.
#### Step 3: Deploy OpenBao
OpenBao requires only `Topology + K8sclient + HelmCommand` (no PostgreSQL dependency).
```bash
cargo run -p example-openbao
```
**What happens internally** (see `harmony/src/modules/openbao/mod.rs`):
1. `OpenbaoScore` directly delegates to `HelmChartScore.create_interpret()` -- there is no custom `execute()` logic, no namespace creation step, no secret generation
2. Deploys OpenBao Helm chart (`openbao/openbao` from `https://openbao.github.io/openbao-helm`):
- `chart_version: None` -- **uses latest chart version** (not pinned)
- `create_namespace: true` -- the `openbao` namespace is created by Helm
- `install_only: false` -- uses `helm upgrade --install`
**Exact Helm values set by OpenbaoScore**:
```yaml
global:
openshift: true # <-- PROBLEM: hardcoded, see below
server:
standalone:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = true
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/openbao/data"
}
service:
enabled: true
ingress:
enabled: true
hosts:
- host: <host field> # e.g., openbao.sebastien.sto1.nationtech.io
dataStorage:
enabled: true
size: 10Gi
storageClass: null # uses cluster default
accessMode: ReadWriteOnce
auditStorage:
enabled: true
size: 10Gi
storageClass: null
accessMode: ReadWriteOnce
ui:
enabled: true
```
**Critical issue: `global.openshift: true` is hardcoded.** The OpenBao Helm chart default is `global.openshift: false`. When set to `true`, the chart adjusts security contexts and may create OpenShift Routes instead of standard Kubernetes Ingress resources. **On k3d (vanilla k8s), this will produce resources that may not work correctly.** Before deploying on k3d, this must be overridden.
**Fix required for k3d**: Either:
1. Modify `OpenbaoScore` to accept an `openshift: bool` field (preferred long-term fix)
2. Or for this example, create a custom example that passes `values_overrides` with `global.openshift=false`
**Post-deployment initialization** (manual -- the TODO in `mod.rs` acknowledges this is not automated):
OpenBao starts in a sealed state. You must initialize and unseal it manually. See https://openbao.org/docs/platform/k8s/helm/run/
```bash
# Initialize OpenBao (generates unseal keys + root token)
kubectl exec -n openbao openbao-0 -- bao operator init
# Save the output! It contains 5 unseal keys and the root token.
# Example output:
# Unseal Key 1: abc123...
# Unseal Key 2: def456...
# ...
# Initial Root Token: hvs.xxxxx
# Unseal (requires 3 of 5 keys by default)
kubectl exec -n openbao openbao-0 -- bao operator unseal <key1>
kubectl exec -n openbao openbao-0 -- bao operator unseal <key2>
kubectl exec -n openbao openbao-0 -- bao operator unseal <key3>
```
**Validation**:
```bash
kubectl exec -n openbao openbao-0 -- bao status
# Should show "Sealed: false"
```
**Note**: The ingress has **no TLS configuration** (unlike Zitadel's ingress). Access is HTTP-only unless you configure TLS separately.
#### Step 4: Configure OpenBao for Harmony
Two paths are available depending on the authentication method:
##### Path A: Userpass auth (simpler, for local dev)
The current `OpenbaoSecretStore` supports **token** and **userpass** authentication. It does NOT yet implement the JWT/OIDC device flow described in ADR 020-1.
```bash
# Port-forward to access OpenBao API
kubectl port-forward svc/openbao -n openbao 8200:8200 &
export BAO_ADDR="http://127.0.0.1:8200"
export BAO_TOKEN="<root token from init>"
# Enable KV v2 secrets engine (default mount "secret")
bao secrets enable -path=secret kv-v2
# Enable userpass auth method
bao auth enable userpass
# Create a user for Harmony
bao write auth/userpass/login/harmony password="harmony-dev-password"
# Create policy granting read/write on harmony/* paths
cat <<'EOF' | bao policy write harmony-dev -
path "secret/data/harmony/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
path "secret/metadata/harmony/*" {
capabilities = ["list", "read", "delete"]
}
EOF
# Create the user with the policy attached
bao write auth/userpass/users/harmony \
password="harmony-dev-password" \
policies="harmony-dev"
```
**Bug in `OpenbaoSecretStore::authenticate_userpass()`**: The `kv_mount` parameter (default `"secret"`) is passed to `vaultrs::auth::userpass::login()` as the auth mount path. This means it calls `POST /v1/auth/secret/login/{username}` instead of the correct `POST /v1/auth/userpass/login/{username}`. **The auth mount and KV mount are conflated into one parameter.**
**Workaround**: Set `OPENBAO_KV_MOUNT=userpass` so the auth call hits the correct mount path. But then KV operations would use mount `userpass` instead of `secret`, which is wrong.
**Proper fix needed**: Split `kv_mount` into two separate parameters: one for the KV v2 engine mount (`secret`) and one for the auth mount (`userpass`). This is a bug in `harmony_secret/src/store/openbao.rs:234`.
**For this example**: Use **token auth** instead of userpass to sidestep the bug:
```bash
# Set env vars for the example
export OPENBAO_URL="http://127.0.0.1:8200"
export OPENBAO_TOKEN="<root token from init>"
export OPENBAO_KV_MOUNT="secret"
```
##### Path B: JWT auth with Zitadel (target architecture, per ADR 020-1)
This is the production path described in the ADR. It requires the device flow code that is **not yet implemented** in `OpenbaoSecretStore`. The current code only supports token and userpass.
When implemented, the flow will be:
1. Enable JWT auth method in OpenBao
2. Configure it to trust Zitadel's OIDC discovery URL
3. Create a role that maps Zitadel JWT claims to OpenBao policies
```bash
# Enable JWT auth
bao auth enable jwt
# Configure JWT auth to trust Zitadel
bao write auth/jwt/config \
oidc_discovery_url="https://<zitadel-host>" \
bound_issuer="https://<zitadel-host>"
# Create role for Harmony developers
bao write auth/jwt/role/harmony-developer \
role_type="jwt" \
bound_audiences="<harmony_client_id>" \
user_claim="email" \
groups_claim="urn:zitadel:iam:org:project:roles" \
policies="harmony-dev" \
ttl="4h" \
max_ttl="24h" \
token_type="service"
```
**Zitadel application setup** (in Zitadel console):
1. Create project: `Harmony`
2. Add application: `Harmony CLI` (Native app type)
3. Enable Device Authorization grant type
4. Set scopes: `openid email profile offline_access`
5. Note the `client_id`
This path is deferred until the device flow is implemented in `OpenbaoSecretStore`.
#### Step 5: Write end-to-end example
The example uses `StoreSource<OpenbaoSecretStore>` with token auth to avoid the userpass mount bug.
**Environment variables required** (from `harmony_secret/src/config.rs`):
| Variable | Required | Default | Notes |
|---|---|---|---|
| `OPENBAO_URL` | Yes | None | Falls back to `VAULT_ADDR` |
| `OPENBAO_TOKEN` | For token auth | None | Root or user token |
| `OPENBAO_USERNAME` | For userpass | None | Requires `OPENBAO_PASSWORD` too |
| `OPENBAO_PASSWORD` | For userpass | None | |
| `OPENBAO_KV_MOUNT` | No | `"secret"` | KV v2 engine mount path. **Also used as userpass auth mount -- this is a bug.** |
| `OPENBAO_SKIP_TLS` | No | `false` | Set `"true"` to disable TLS verification |
**Note**: `OpenbaoSecretStore::new()` is `async` and **requires a running OpenBao** at construction time (it validates the token if using cached auth). If OpenBao is unreachable during construction, the call will fail. The graceful fallback only applies to `StoreSource::get()` calls after construction -- the `ConfigManager` must be built with a live store, or the store must be wrapped in a lazy initialization pattern.
```rust
#[tokio::test]
async fn test_full_resolution_chain() {
// 1. No env var, no SQLite entry → prompting would happen
// (test with mock/pre-seeded source instead of real stdin)
// 2. Set in SQLite → get() returns SQLite value
// 3. Set env var → get() returns env value (overrides SQLite)
// 4. Remove env var → get() falls back to SQLite
// harmony_config/examples/openbao_chain.rs
use harmony_config::{ConfigManager, EnvSource, SqliteSource, StoreSource};
use harmony_secret::OpenbaoSecretStore;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize, schemars::JsonSchema, PartialEq)]
struct AppConfig {
host: String,
port: u16,
}
#[tokio::test]
async fn test_branch_switching_scenario() {
// Simulate: struct shape changes between branches.
// Old value in SQLite doesn't match new struct.
// get() should return Deserialization error.
// get_or_prompt() should re-prompt and overwrite.
impl harmony_config::Config for AppConfig {
const KEY: &'static str = "AppConfig";
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
// Build the source chain
let env_source: Arc<dyn harmony_config::ConfigSource> = Arc::new(EnvSource);
let sqlite = Arc::new(
SqliteSource::default()
.await
.expect("Failed to open SQLite"),
);
// OpenBao store -- requires OPENBAO_URL and OPENBAO_TOKEN env vars
// Falls back gracefully if OpenBao is unreachable at query time
let openbao_url = std::env::var("OPENBAO_URL")
.or(std::env::var("VAULT_ADDR"))
.ok();
let sources: Vec<Arc<dyn harmony_config::ConfigSource>> = if let Some(url) = openbao_url {
let kv_mount = std::env::var("OPENBAO_KV_MOUNT")
.unwrap_or_else(|_| "secret".to_string());
let skip_tls = std::env::var("OPENBAO_SKIP_TLS")
.map(|v| v == "true")
.unwrap_or(false);
match OpenbaoSecretStore::new(
url,
kv_mount,
skip_tls,
std::env::var("OPENBAO_TOKEN").ok(),
std::env::var("OPENBAO_USERNAME").ok(),
std::env::var("OPENBAO_PASSWORD").ok(),
)
.await
{
Ok(store) => {
let store_source = Arc::new(StoreSource::new("harmony".to_string(), store));
vec![env_source, Arc::clone(&sqlite) as _, store_source]
}
Err(e) => {
eprintln!("Warning: OpenBao unavailable ({e}), using local sources only");
vec![env_source, sqlite]
}
}
} else {
println!("No OPENBAO_URL set, using local sources only");
vec![env_source, sqlite]
};
let manager = ConfigManager::new(sources);
// Scenario 1: get() with nothing stored -- returns NotFound
let result = manager.get::<AppConfig>().await;
println!("Get (empty): {:?}", result);
// Scenario 2: set() then get()
let config = AppConfig {
host: "production.example.com".to_string(),
port: 443,
};
manager.set(&config).await?;
println!("Set: {:?}", config);
let retrieved = manager.get::<AppConfig>().await?;
println!("Get (after set): {:?}", retrieved);
assert_eq!(config, retrieved);
println!("End-to-end chain validated!");
Ok(())
}
```
### 1.4 Validate Zitadel + OpenBao integration path
**Key behaviors demonstrated**:
1. **Graceful construction fallback**: If `OPENBAO_URL` is not set or OpenBao is unreachable at startup, the chain is built without it
2. **Graceful query fallback**: `StoreSource::get()` returns `Ok(None)` on any error, so the chain continues to SQLite
3. **Environment override**: `HARMONY_CONFIG_AppConfig='{"host":"env-host","port":9090}'` bypasses all backends
This is not about building the full OIDC flow yet. It's about validating that the architecture supports it by adding `StoreSource<OpenbaoSecretStore>` to the source chain.
#### Step 6: Validate graceful fallback
**Validate**:
- `ConfigManager::new(vec![EnvSource, SqliteSource, StoreSource<Openbao>])` compiles and works
- When OpenBao is unreachable, the chain falls through to SQLite gracefully (no panic)
- When OpenBao has the value, it's returned and SQLite is not queried
Already validated via unit tests (26 tests pass):
**Document** the target Zitadel OIDC flow as an ADR (RFC 8628 device authorization grant), but don't implement it yet. The `StoreSource` wrapping OpenBao with JWT auth is the integration point — Zitadel provides the JWT, OpenBao validates it.
- `test_store_source_error_falls_through_to_sqlite` -- `StoreSource` with `AlwaysErrorStore` returns connection error, chain falls through to `SqliteSource`
- `test_store_source_not_found_falls_through_to_sqlite` -- `StoreSource` returns `NotFound`, chain falls through to `SqliteSource`
### 1.5 UX validation checklist
**Code path (FIXED in `harmony_config/src/source/store.rs`)**:
```rust
// StoreSource::get() -- returns Ok(None) on ANY error, allowing chain to continue
match self.store.get_raw(&self.namespace, key).await {
Ok(bytes) => { /* deserialize and return */ Ok(Some(value)) }
Err(SecretStoreError::NotFound { .. }) => Ok(None),
Err(_) => Ok(None), // Connection errors, timeouts, etc.
}
```
Before this phase is done, manually verify:
#### Step 7: Known issues and blockers
- [ ] `cargo run --example postgresql` with no env vars → prompts for nothing (postgresql doesn't use secrets yet, but the config system initializes cleanly)
| Issue | Location | Severity | Status |
|---|---|---|---|
| `global.openshift: true` hardcoded | `harmony/src/modules/openbao/mod.rs:32` | **Blocker for k3d** | ✅ Fixed: Added `openshift: bool` field to `OpenbaoScore` (defaults to `false`) |
| `kv_mount` used as auth mount path | `harmony_secret/src/store/openbao.rs:234` | **Bug** | ✅ Fixed: Added separate `auth_mount` parameter; added `OPENBAO_AUTH_MOUNT` env var |
| Admin email hardcoded `admin@zitadel.example.com` | `harmony/src/modules/zitadel/mod.rs:314` | Minor | Cosmetic mismatch with success message |
| `ExternalSecure: true` hardcoded | `harmony/src/modules/zitadel/mod.rs:306` | **Issue for k3d** | ✅ Fixed: Zitadel now detects Kubernetes distribution and uses appropriate settings (OpenShift = TLS + cert-manager annotations, k3d = plain nginx ingress without TLS) |
| No Helm chart version pinning | Both modules | Risk | Non-deterministic deploys |
| No `--wait` on Helm install | `harmony/src/modules/helm/chart.rs` | UX | Must manually wait for readiness |
| `get_version()`/`get_status()` are `todo!()` | Both modules | Panic risk | Do not call these methods |
| JWT/OIDC device flow not implemented | `harmony_secret/src/store/openbao.rs` | **Gap** | ✅ Implemented: `ZitadelOidcAuth` in `harmony_secret/src/store/zitadel.rs` |
| `HARMONY_SECRET_NAMESPACE` panics if not set | `harmony_secret/src/config.rs:5` | Runtime panic | Only affects `SecretManager`, not `StoreSource` directly |
**Remaining work**:
- [x] `StoreSource<OpenbaoSecretStore>` integration validates compilation
- [x] StoreSource returns `Ok(None)` on connection error (not `Err`)
- [x] Graceful fallback tests pass when OpenBao is unreachable (2 new tests)
- [x] Fix `global.openshift: true` in `OpenbaoScore` for k3d compatibility
- [x] Fix `kv_mount` / auth mount conflation bug in `OpenbaoSecretStore`
- [x] Create and test `harmony_config/examples/openbao_chain.rs` against real k3d deployment
- [x] Implement JWT/OIDC device flow in `OpenbaoSecretStore` (ADR 020-1) — `ZitadelOidcAuth` implemented and wired into `OpenbaoSecretStore::new()` auth chain
- [x] Fix Zitadel distribution detection — Zitadel now uses `k8s_client.get_k8s_distribution()` to detect OpenShift vs k3d and applies appropriate Helm values (TLS + cert-manager for OpenShift, plain nginx for k3d)
### 1.5 UX validation checklist ⏳
**Status**: Partially complete - manual verification needed
- [ ] `cargo run --example postgresql` with no env vars → prompts for nothing
- [ ] An example that uses `SecretManager` today (e.g., `brocade_snmp_server`) → when migrated to `harmony_config`, first run prompts, second run reads from SQLite
- [ ] Setting `HARMONY_CONFIG_BrocadeSwitchAuth='{"host":"...","user":"...","password":"..."}'` → skips prompt, uses env value
- [ ] Deleting `~/.local/share/harmony/config.db` → re-prompts on next run
- [ ] Deleting `~/.local/share/harmony/config/` directory → re-prompts on next run
## Deliverables
- [ ] `SqliteSource` implementation with tests
- [ ] Functional `PromptSource` (or validated that current `get_or_prompt` flow is correct)
- [ ] Fix `get_or_prompt` to persist to first writable source, not all sources
- [ ] Integration tests for full resolution chain
- [ ] Branch-switching deserialization failure test
- [ ] `StoreSource<OpenbaoSecretStore>` integration validated (compiles, graceful fallback)
- [ ] ADR for Zitadel OIDC target architecture
- [x] `SqliteSource` implementation with tests
- [x] Functional `PromptSource` with `should_persist()` design
- [x] Fix `get_or_prompt` to persist to first writable source (via `should_persist()`), not all sources
- [x] Integration tests for full resolution chain
- [x] Branch-switching deserialization failure test
- [x] `StoreSource<OpenbaoSecretStore>` integration validated (compiles, graceful fallback)
- [x] ADR for Zitadel OIDC target architecture
- [ ] Update docs to reflect final implementation and behavior
## Key Implementation Notes
1. **SQLite path**: `~/.local/share/harmony/config/config.db` (not `~/.local/share/harmony/config.db`)
2. **Auto-create directory**: `SqliteSource::open()` creates parent directories if they don't exist
3. **Default path**: `SqliteSource::default()` uses `directories::ProjectDirs` to find the correct data directory
4. **Env var precedence**: Environment variables always take precedence over SQLite in the resolution chain
5. **Testing**: All tests use `tempfile::NamedTempFile` for temporary database paths, ensuring test isolation
6. **Graceful fallback**: `StoreSource::get()` returns `Ok(None)` on any error (connection refused, timeout, etc.), allowing the chain to fall through to the next source. This ensures OpenBao unavailability doesn't break the config chain.
7. **StoreSource errors don't block chain**: When OpenBao is unreachable, `StoreSource::get()` returns `Ok(None)` and the `ConfigManager` continues to the next source (typically `SqliteSource`). This is validated by `test_store_source_error_falls_through_to_sqlite` and `test_store_source_not_found_falls_through_to_sqlite`.

4
brocade/examples/env.sh Normal file
View File

@@ -0,0 +1,4 @@
export HARMONY_SECRET_NAMESPACE=brocade-example
export HARMONY_SECRET_STORE=file
export HARMONY_DATABASE_URL=sqlite://harmony_brocade_example.sqlite
export RUST_LOG=info

View File

@@ -1,6 +1,6 @@
use std::net::{IpAddr, Ipv4Addr};
use brocade::{BrocadeOptions, ssh};
use brocade::{BrocadeOptions, Vlan, ssh};
use harmony_secret::{Secret, SecretManager};
use harmony_types::switch::PortLocation;
use schemars::JsonSchema;
@@ -17,9 +17,12 @@ async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
let switch_addresses = vec![ip];
//let switch_addresses = vec![ip];
let ip0 = IpAddr::V4(Ipv4Addr::new(192, 168, 12, 147)); // brocade @ test
let ip1 = IpAddr::V4(Ipv4Addr::new(192, 168, 12, 109)); // brocade @ test
let switch_addresses = vec![ip0, ip1];
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
@@ -32,7 +35,7 @@ async fn main() {
&BrocadeOptions {
dry_run: true,
ssh: ssh::SshOptions {
port: 2222,
port: 22,
..Default::default()
},
..Default::default()
@@ -58,18 +61,38 @@ async fn main() {
}
println!("--------------");
todo!();
println!("Creating VLAN 100 (test-vlan)...");
brocade
.create_vlan(&Vlan {
id: 100,
name: "test-vlan".to_string(),
})
.await
.unwrap();
println!("--------------");
println!("Deleting VLAN 100...");
brocade
.delete_vlan(&Vlan {
id: 100,
name: "test-vlan".to_string(),
})
.await
.unwrap();
println!("--------------");
todo!("STOP!");
let channel_name = "1";
brocade.clear_port_channel(channel_name).await.unwrap();
println!("--------------");
let channel_id = brocade.find_available_channel_id().await.unwrap();
let channel_id = 1;
println!("--------------");
let channel_name = "HARMONY_LAG";
let ports = [PortLocation(2, 0, 35)];
brocade
.create_port_channel(channel_id, channel_name, &ports)
.create_port_channel(channel_id, channel_name, &ports, None)
.await
.unwrap();
}

View File

@@ -0,0 +1,242 @@
use std::io::{self, Write};
use brocade::{
BrocadeOptions, InterfaceConfig, InterfaceSpeed, InterfaceType, PortOperatingMode,
SwitchInterface, Vlan, VlanList, ssh,
};
use harmony_secret::{Secret, SecretManager};
use harmony_types::switch::PortLocation;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Secret, Clone, Debug, JsonSchema, Serialize, Deserialize)]
struct BrocadeSwitchAuth {
username: String,
password: String,
}
fn wait_for_enter() {
println!("\n--- Press ENTER to continue ---");
io::stdout().flush().unwrap();
io::stdin().read_line(&mut String::new()).unwrap();
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let ip0 = std::net::IpAddr::V4(std::net::Ipv4Addr::new(192, 168, 12, 147));
let ip1 = std::net::IpAddr::V4(std::net::Ipv4Addr::new(192, 168, 12, 109));
let switch_addresses = vec![ip0, ip1];
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.unwrap();
let brocade = brocade::init(
&switch_addresses,
&config.username,
&config.password,
&BrocadeOptions {
dry_run: false,
ssh: ssh::SshOptions {
port: 22,
..Default::default()
},
..Default::default()
},
)
.await
.expect("Brocade client failed to connect");
println!("=== Connecting to Brocade switches ===");
let version = brocade.version().await.unwrap();
println!("Version: {version:?}");
let entries = brocade.get_stack_topology().await.unwrap();
println!("Stack topology: {entries:#?}");
println!("\n=== Creating VLANs 100, 200, 300 ===");
brocade
.create_vlan(&Vlan {
id: 100,
name: "vlan100".to_string(),
})
.await
.unwrap();
println!("Created VLAN 100 (vlan100)");
brocade
.create_vlan(&Vlan {
id: 200,
name: "vlan200".to_string(),
})
.await
.unwrap();
println!("Created VLAN 200 (vlan200)");
brocade
.create_vlan(&Vlan {
id: 300,
name: "vlan300".to_string(),
})
.await
.unwrap();
println!("Created VLAN 300 (vlan300)");
println!("\n=== Press ENTER to continue to port configuration tests ---");
wait_for_enter();
println!("\n=== TEST 1: Trunk port (all VLANs, speed 10Gbps) on TenGigabitEthernet 1/0/1 ===");
println!("Configuring port as trunk with all VLANs and speed 10Gbps...");
let configs = vec![InterfaceConfig {
interface: SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 1),
),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: Some(InterfaceSpeed::Gbps10),
}];
brocade.configure_interfaces(&configs).await.unwrap();
println!("Querying interfaces...");
let interfaces = brocade.get_interfaces().await.unwrap();
for iface in &interfaces {
if iface.name.contains("1/0/1") {
println!(" {iface:?}");
}
}
wait_for_enter();
println!("\n=== TEST 2: Trunk port (specific VLANs) on TenGigabitEthernet 1/0/2 ===");
println!("Configuring port as trunk with VLANs 100, 200...");
let configs = vec![InterfaceConfig {
interface: SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 2),
),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::Specific(vec![
Vlan {
id: 100,
name: "vlan100".to_string(),
},
Vlan {
id: 200,
name: "vlan200".to_string(),
},
])),
speed: None,
}];
brocade.configure_interfaces(&configs).await.unwrap();
println!("Querying interfaces...");
let interfaces = brocade.get_interfaces().await.unwrap();
for iface in &interfaces {
if iface.name.contains("1/0/2") {
println!(" {iface:?}");
}
}
wait_for_enter();
println!("\n=== TEST 3: Access port (default VLAN 1) on TenGigabitEthernet 1/0/3 ===");
println!("Configuring port as access (default VLAN 1)...");
let configs = vec![InterfaceConfig {
interface: SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 3),
),
mode: PortOperatingMode::Access,
access_vlan: None,
trunk_vlans: None,
speed: None,
}];
brocade.configure_interfaces(&configs).await.unwrap();
println!("Querying interfaces...");
let interfaces = brocade.get_interfaces().await.unwrap();
for iface in &interfaces {
if iface.name.contains("1/0/3") {
println!(" {iface:?}");
}
}
wait_for_enter();
println!("\n=== TEST 4: Access port (custom VLAN 100) on TenGigabitEthernet 1/0/4 ===");
println!("Configuring port as access with VLAN 100...");
let configs = vec![InterfaceConfig {
interface: SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 4),
),
mode: PortOperatingMode::Access,
access_vlan: Some(100),
trunk_vlans: None,
speed: None,
}];
brocade.configure_interfaces(&configs).await.unwrap();
println!("Querying interfaces...");
let interfaces = brocade.get_interfaces().await.unwrap();
for iface in &interfaces {
if iface.name.contains("1/0/4") {
println!(" {iface:?}");
}
}
wait_for_enter();
println!("\n=== TEST 5: Port-channel on TenGigabitEthernet 1/0/5 and 1/0/6 ===");
let channel_id = 1;
println!("Using channel ID: {channel_id}");
println!("Creating port-channel with ports 1/0/5 and 1/0/6...");
let ports = [PortLocation(1, 0, 5), PortLocation(1, 0, 6)];
brocade
.create_port_channel(channel_id, "HARMONY_LAG", &ports, None)
.await
.unwrap();
println!("Port-channel created.");
println!("Querying port-channel summary...");
let interfaces = brocade.get_interfaces().await.unwrap();
for iface in &interfaces {
if iface.name.contains("1/0/5") || iface.name.contains("1/0/6") {
println!(" {iface:?}");
}
}
wait_for_enter();
println!("\n=== TEARDOWN: Clearing port-channels and deleting VLANs ===");
println!("Clearing port-channel {channel_id}...");
brocade
.clear_port_channel(&channel_id.to_string())
.await
.unwrap();
println!("Resetting interfaces...");
for port in 1..=6 {
let interface = format!("TenGigabitEthernet 1/0/{port}");
println!(" Resetting {interface}...");
brocade.reset_interface(&interface).await.unwrap();
}
println!("Deleting VLAN 100...");
brocade
.delete_vlan(&Vlan {
id: 100,
name: "vlan100".to_string(),
})
.await
.unwrap();
println!("Deleting VLAN 200...");
brocade
.delete_vlan(&Vlan {
id: 200,
name: "vlan200".to_string(),
})
.await
.unwrap();
println!("Deleting VLAN 300...");
brocade
.delete_vlan(&Vlan {
id: 300,
name: "vlan300".to_string(),
})
.await
.unwrap();
println!("\n=== DONE ===");
}

View File

@@ -1,7 +1,8 @@
use super::BrocadeClient;
use crate::{
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceConfig, InterfaceInfo,
InterfaceSpeed, MacAddressEntry, PortChannelId, PortOperatingMode, Vlan,
parse_brocade_mac_address, shell::BrocadeShell,
};
use async_trait::async_trait;
@@ -138,10 +139,15 @@ impl BrocadeClient for FastIronClient {
todo!()
}
async fn configure_interfaces(
&self,
_interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
async fn configure_interfaces(&self, _interfaces: &Vec<InterfaceConfig>) -> Result<(), Error> {
todo!()
}
async fn create_vlan(&self, _vlan: &Vlan) -> Result<(), Error> {
todo!()
}
async fn delete_vlan(&self, _vlan: &Vlan) -> Result<(), Error> {
todo!()
}
@@ -180,11 +186,18 @@ impl BrocadeClient for FastIronClient {
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
speed: Option<&InterfaceSpeed>,
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
);
if let Some(speed) = speed {
log::warn!(
"[Brocade] FastIron: speed override ({speed}) on port-channel is not yet implemented; ignoring"
);
}
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
self.shell
.run_commands(commands, ExecutionMode::Privileged)
@@ -194,6 +207,25 @@ impl BrocadeClient for FastIronClient {
Ok(())
}
async fn reset_interface(&self, interface: &str) -> Result<(), Error> {
info!("[Brocade] Resetting interface: {interface}");
let commands = vec![
"configure terminal".into(),
format!("interface {interface}"),
"no switchport".into(),
"no speed".into(),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Interface '{interface}' reset.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");

View File

@@ -76,6 +76,74 @@ pub struct MacAddressEntry {
pub type PortChannelId = u8;
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct Vlan {
pub id: u16,
pub name: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum VlanList {
All,
Specific(Vec<Vlan>),
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum SwitchInterface {
Ethernet(InterfaceType, PortLocation),
PortChannel(PortChannelId),
}
impl fmt::Display for SwitchInterface {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SwitchInterface::Ethernet(itype, loc) => write!(f, "{itype} {loc}"),
SwitchInterface::PortChannel(id) => write!(f, "port-channel {id}"),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum InterfaceSpeed {
Mbps100,
Gbps1,
Gbps1Auto,
Gbps10,
Auto,
}
impl fmt::Display for InterfaceSpeed {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterfaceSpeed::Mbps100 => write!(f, "100"),
InterfaceSpeed::Gbps1 => write!(f, "1000"),
InterfaceSpeed::Gbps1Auto => write!(f, "1000-auto"),
InterfaceSpeed::Gbps10 => write!(f, "10000"),
InterfaceSpeed::Auto => write!(f, "auto"),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct InterfaceConfig {
pub interface: SwitchInterface,
pub mode: PortOperatingMode,
pub access_vlan: Option<u16>,
pub trunk_vlans: Option<VlanList>,
pub speed: Option<InterfaceSpeed>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct PortChannelConfig {
pub id: PortChannelId,
pub name: String,
pub ports: Vec<PortLocation>,
pub mode: PortOperatingMode,
pub access_vlan: Option<Vlan>,
pub trunk_vlans: Option<VlanList>,
pub speed: Option<InterfaceSpeed>,
}
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
///
/// This structure provides a standardized view of the topology regardless of the
@@ -104,16 +172,17 @@ pub struct InterfaceInfo {
}
/// Categorizes the functional type of a switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
pub enum InterfaceType {
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
Ethernet(String),
TenGigabitEthernet,
FortyGigabitEthernet,
}
impl fmt::Display for InterfaceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterfaceType::Ethernet(name) => write!(f, "{name}"),
InterfaceType::TenGigabitEthernet => write!(f, "TenGigabitEthernet"),
InterfaceType::FortyGigabitEthernet => write!(f, "FortyGigabitEthernet"),
}
}
}
@@ -206,10 +275,13 @@ pub trait BrocadeClient: std::fmt::Debug {
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
async fn configure_interfaces(
&self,
interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error>;
async fn configure_interfaces(&self, interfaces: &Vec<InterfaceConfig>) -> Result<(), Error>;
/// Creates a new VLAN on the switch.
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), Error>;
/// Deletes a VLAN from the switch.
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), Error>;
/// Scans the existing configuration to find the next available (unused)
/// Port-Channel ID (`lag` or `trunk`) for assignment.
@@ -230,11 +302,16 @@ pub trait BrocadeClient: std::fmt::Debug {
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
/// * `speed`: Optional speed override applied to both the logical port-channel
/// interface and each member port. Required on Brocade when forcing a
/// non-default speed (e.g. 1G on 10G-capable ports), otherwise the LAG
/// members and the logical interface end up inconsistent.
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
speed: Option<&InterfaceSpeed>,
) -> Result<(), Error>;
/// Enables Simple Network Management Protocol (SNMP) server for switch
@@ -246,6 +323,9 @@ pub trait BrocadeClient: std::fmt::Debug {
/// * `des`: The Data Encryption Standard algorithm key
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error>;
/// Resets an interface to its default state by removing switchport configuration.
async fn reset_interface(&self, interface: &str) -> Result<(), Error>;
/// Removes all configuration associated with the specified Port-Channel name.
///
/// This operation should be idempotent; attempting to clear a non-existent

View File

@@ -6,9 +6,10 @@ use log::{debug, info};
use regex::Regex;
use crate::{
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
parse_brocade_mac_address, shell::BrocadeShell,
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceConfig,
InterfaceInfo, InterfaceSpeed, InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId,
PortOperatingMode, SwitchInterface, Vlan, VlanList, parse_brocade_mac_address,
shell::BrocadeShell,
};
#[derive(Debug)]
@@ -84,8 +85,8 @@ impl NetworkOperatingSystemClient {
}
let interface_type = match parts[0] {
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
"Fo" => InterfaceType::FortyGigabitEthernet,
"Te" => InterfaceType::TenGigabitEthernet,
_ => return None,
};
let port_location = PortLocation::from_str(parts[1]).ok()?;
@@ -185,18 +186,20 @@ impl BrocadeClient for NetworkOperatingSystemClient {
.collect()
}
async fn configure_interfaces(
&self,
interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
async fn configure_interfaces(&self, interfaces: &Vec<InterfaceConfig>) -> Result<(), Error> {
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
let mut commands = vec!["configure terminal".to_string()];
for interface in interfaces {
commands.push(format!("interface {}", interface.0));
debug!(
"[Brocade] Configuring interface {} as {:?}",
interface.interface, interface.mode
);
match interface.1 {
commands.push(format!("interface {}", interface.interface));
match interface.mode {
PortOperatingMode::Fabric => {
commands.push("fabric isl enable".into());
commands.push("fabric trunk enable".into());
@@ -204,23 +207,50 @@ impl BrocadeClient for NetworkOperatingSystemClient {
PortOperatingMode::Trunk => {
commands.push("switchport".into());
commands.push("switchport mode trunk".into());
commands.push("switchport trunk allowed vlan all".into());
match &interface.trunk_vlans {
Some(VlanList::All) => {
commands.push("switchport trunk allowed vlan all".into());
}
Some(VlanList::Specific(vlans)) => {
for vlan in vlans {
commands.push(format!("switchport trunk allowed vlan add {}", vlan.id));
}
}
None => {
commands.push("switchport trunk allowed vlan all".into());
}
}
commands.push("no switchport trunk tag native-vlan".into());
commands.push("spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push("no shutdown".into());
if matches!(interface.interface, SwitchInterface::Ethernet(..)) {
commands.push("spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
}
PortOperatingMode::Access => {
commands.push("switchport".into());
commands.push("switchport mode access".into());
commands.push("switchport access vlan 1".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
let access_vlan = interface.access_vlan.unwrap_or(1);
commands.push(format!("switchport access vlan {access_vlan}"));
if matches!(interface.interface, SwitchInterface::Ethernet(..)) {
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
}
}
if let Some(speed) = &interface.speed {
info!(
"[Brocade] Overriding speed on {} to {speed}",
interface.interface
);
if matches!(interface.interface, SwitchInterface::PortChannel(..)) {
commands.push("shutdown".into());
}
commands.push(format!("speed {speed}"));
}
commands.push("no shutdown".into());
commands.push("exit".into());
}
@@ -235,6 +265,40 @@ impl BrocadeClient for NetworkOperatingSystemClient {
Ok(())
}
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), Error> {
info!("[Brocade] Creating VLAN {} ({})", vlan.id, vlan.name);
let commands = vec![
"configure terminal".into(),
format!("interface Vlan {}", vlan.id),
format!("name {}", vlan.name),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] VLAN {} ({}) created.", vlan.id, vlan.name);
Ok(())
}
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), Error> {
info!("[Brocade] Deleting VLAN {}", vlan.id);
let commands = vec![
"configure terminal".into(),
format!("no interface Vlan {}", vlan.id),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] VLAN {} deleted.", vlan.id);
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
@@ -273,6 +337,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
speed: Option<&InterfaceSpeed>,
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
@@ -283,27 +348,34 @@ impl BrocadeClient for NetworkOperatingSystemClient {
.join(", ")
);
let interfaces = self.get_interfaces().await?;
let mut commands = vec![
"configure terminal".into(),
format!("interface port-channel {}", channel_id),
"no shutdown".into(),
"exit".into(),
format!("description {channel_name}"),
];
if let Some(speed) = speed {
commands.push("shutdown".into());
commands.push(format!("speed {speed}"));
commands.push("no shutdown".into());
}
commands.push("exit".into());
for port in ports {
let interface = interfaces.iter().find(|i| i.port_location == *port);
let Some(interface) = interface else {
continue;
};
commands.push(format!("interface {}", interface.name));
debug!(
"[Brocade] Adding port TenGigabitEthernet {} to channel-group {}",
port, channel_id
);
commands.push(format!("interface TenGigabitEthernet {}", port));
commands.push("no switchport".into());
commands.push("no ip address".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push(format!("channel-group {channel_id} mode active"));
commands.push("lacp timeout short".into());
if let Some(speed) = speed {
commands.push(format!("speed {speed}"));
}
commands.push("no shutdown".into());
commands.push("exit".into());
}
@@ -317,6 +389,25 @@ impl BrocadeClient for NetworkOperatingSystemClient {
Ok(())
}
async fn reset_interface(&self, interface: &str) -> Result<(), Error> {
info!("[Brocade] Resetting interface: {interface}");
let commands = vec![
"configure terminal".into(),
format!("interface {interface}"),
"no switchport".into(),
"no speed".into(),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Interface '{interface}' reset.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");

View File

@@ -3,6 +3,9 @@ set -e
cd "$(dirname "$0")/.."
git submodule init
git submodule update
rustc --version
cargo check --all-targets --all-features --keep-going
cargo fmt --check

View File

@@ -1,6 +1,6 @@
use std::str::FromStr;
use brocade::{BrocadeOptions, PortOperatingMode};
use brocade::{BrocadeOptions, InterfaceConfig, InterfaceType, PortOperatingMode, SwitchInterface, VlanList};
use harmony::{
infra::brocade::BrocadeSwitchConfig,
inventory::Inventory,
@@ -9,6 +9,13 @@ use harmony::{
use harmony_macros::ip;
use harmony_types::{id::Id, switch::PortLocation};
fn tengig(stack: u8, slot: u8, port: u8) -> SwitchInterface {
SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(stack, slot, port),
)
}
fn get_switch_config() -> BrocadeSwitchConfig {
let mut options = BrocadeOptions::default();
options.ssh.port = 2222;
@@ -33,9 +40,27 @@ async fn main() {
Id::from_str("18").unwrap(),
],
ports_to_configure: vec![
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
InterfaceConfig {
interface: tengig(2, 0, 17),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: None,
},
InterfaceConfig {
interface: tengig(2, 0, 19),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: None,
},
InterfaceConfig {
interface: tengig(1, 0, 18),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: None,
},
],
};

View File

@@ -0,0 +1,18 @@
[package]
name = "brocade-switch-configuration"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
async-trait.workspace = true
serde.workspace = true
log.workspace = true
env_logger.workspace = true
brocade = { path = "../../brocade" }

View File

@@ -0,0 +1,4 @@
export HARMONY_SECRET_NAMESPACE=brocade-example
export HARMONY_SECRET_STORE=file
export HARMONY_DATABASE_URL=sqlite://harmony_brocade_example.sqlite
export RUST_LOG=info

View File

@@ -0,0 +1,144 @@
use brocade::{
BrocadeOptions, InterfaceConfig, InterfaceSpeed, InterfaceType, PortChannelConfig,
PortOperatingMode, SwitchInterface, Vlan, VlanList,
};
use harmony::{
infra::brocade::BrocadeSwitchConfig,
inventory::Inventory,
modules::brocade::{BrocadeSwitchAuth, BrocadeSwitchConfigurationScore, SwitchTopology},
};
use harmony_macros::ip;
use harmony_types::switch::PortLocation;
fn tengig(stack: u8, slot: u8, port: u8) -> SwitchInterface {
SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(stack, slot, port),
)
}
fn get_switch_config() -> BrocadeSwitchConfig {
let auth = BrocadeSwitchAuth {
username: "admin".to_string(),
password: "password".to_string(),
};
BrocadeSwitchConfig {
// ips: vec![ip!("192.168.12.147"), ip!("192.168.12.109")],
ips: vec![ip!("192.168.4.12"), ip!("192.168.4.11")],
auth,
options: BrocadeOptions {
dry_run: false,
ssh: brocade::ssh::SshOptions {
port: 22,
..Default::default()
},
..Default::default()
},
}
}
#[tokio::main]
async fn main() {
harmony_cli::cli_logger::init();
// ===================================================
// Step 1: Define VLANs once, use them everywhere
// ===================================================
let mgmt = Vlan {
id: 100,
name: "MGMT".to_string(),
};
let data = Vlan {
id: 200,
name: "DATA".to_string(),
};
let storage = Vlan {
id: 300,
name: "STORAGE".to_string(),
};
let backup = Vlan {
id: 400,
name: "BACKUP".to_string(),
};
// ===================================================
// Step 2: Build the score
// ===================================================
let score = BrocadeSwitchConfigurationScore {
// All VLANs that need to exist on the switch
vlans: vec![mgmt.clone(), data.clone(), storage.clone(), backup.clone()],
// Standalone interfaces (not part of any port-channel)
interfaces: vec![
// Trunk port with ALL VLANs, forced to 10Gbps
InterfaceConfig {
interface: tengig(1, 0, 20),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: Some(InterfaceSpeed::Gbps10),
},
// Trunk port with specific VLANs (MGMT + DATA only)
InterfaceConfig {
interface: tengig(1, 0, 21),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::Specific(vec![mgmt.clone(), data.clone()])),
speed: None,
},
// Access port on the MGMT VLAN
InterfaceConfig {
interface: tengig(1, 0, 22),
mode: PortOperatingMode::Access,
access_vlan: Some(mgmt.id),
trunk_vlans: None,
speed: None,
},
// Access port on the STORAGE VLAN
InterfaceConfig {
interface: tengig(1, 0, 23),
mode: PortOperatingMode::Access,
access_vlan: Some(storage.id),
trunk_vlans: None,
speed: None,
},
],
// Port-channels: member ports are bundled, L2 config goes on the port-channel
port_channels: vec![
// Port-channel 1: trunk with DATA + STORAGE VLANs, forced to 1Gbps
PortChannelConfig {
id: 1,
name: "SERVER_BOND".to_string(),
ports: vec![PortLocation(1, 0, 24), PortLocation(1, 0, 25)],
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::Specific(vec![data.clone(), storage.clone()])),
speed: Some(InterfaceSpeed::Gbps1),
},
// Port-channel 2: trunk with all VLANs, default speed
PortChannelConfig {
id: 2,
name: "BACKUP_BOND".to_string(),
ports: vec![PortLocation(1, 0, 26), PortLocation(1, 0, 27)],
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: Some(VlanList::All),
speed: None,
},
],
};
// ===================================================
// Step 3: Run
// ===================================================
harmony_cli::run(
Inventory::autoload(),
SwitchTopology::new(get_switch_config()).await,
vec![Box::new(score)],
None,
)
.await
.unwrap();
}

View File

@@ -0,0 +1,25 @@
[package]
name = "example-harmony-sso"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_config = { path = "../../harmony_config" }
harmony_macros = { path = "../../harmony_macros" }
harmony_secret = { path = "../../harmony_secret" }
harmony_types = { path = "../../harmony_types" }
k3d-rs = { path = "../../k3d" }
kube.workspace = true
tokio.workspace = true
url.workspace = true
log.workspace = true
env_logger.workspace = true
serde.workspace = true
serde_json.workspace = true
anyhow.workspace = true
reqwest.workspace = true
directories = "6.0.0"

View File

@@ -0,0 +1,395 @@
use anyhow::Context;
use harmony::inventory::Inventory;
use harmony::modules::openbao::OpenbaoScore;
use harmony::score::Score;
use harmony::topology::Topology;
use k3d_rs::{K3d, PortMapping};
use log::info;
use serde::Deserialize;
use serde::Serialize;
use std::path::PathBuf;
use std::process::Command;
const CLUSTER_NAME: &str = "harmony-example";
const ZITADEL_HOST: &str = "sso.harmony.local";
const OPENBAO_HOST: &str = "bao.harmony.local";
const ZITADEL_PORT: u32 = 8080;
const OPENBAO_PORT: u32 = 8200;
fn get_k3d_binary_path() -> PathBuf {
directories::BaseDirs::new()
.map(|dirs| dirs.data_dir().join("harmony").join("k3d"))
.unwrap_or_else(|| PathBuf::from("/tmp/harmony-k3d"))
}
fn get_openbao_data_path() -> PathBuf {
directories::BaseDirs::new()
.map(|dirs| dirs.data_dir().join("harmony").join("openbao"))
.unwrap_or_else(|| PathBuf::from("/tmp/harmony-openbao"))
}
async fn ensure_k3d_cluster() -> anyhow::Result<()> {
let base_dir = get_k3d_binary_path();
std::fs::create_dir_all(&base_dir).context("Failed to create k3d data directory")?;
info!(
"Ensuring k3d cluster '{}' is running with port mappings",
CLUSTER_NAME
);
let k3d = K3d::new(base_dir.clone(), Some(CLUSTER_NAME.to_string())).with_port_mappings(vec![
PortMapping::new(ZITADEL_PORT, 80),
PortMapping::new(OPENBAO_PORT, 8200),
]);
k3d.ensure_installed()
.await
.map_err(|e| anyhow::anyhow!("Failed to ensure k3d installed: {}", e))?;
info!("k3d cluster '{}' is ready", CLUSTER_NAME);
Ok(())
}
fn create_topology() -> harmony::topology::K8sAnywhereTopology {
unsafe {
std::env::set_var("HARMONY_USE_LOCAL_K3D", "false");
std::env::set_var("HARMONY_AUTOINSTALL", "false");
std::env::set_var("HARMONY_K8S_CONTEXT", "k3d-harmony-example");
}
harmony::topology::K8sAnywhereTopology::from_env()
}
async fn cleanup_openbao_webhook() -> anyhow::Result<()> {
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"get",
"mutatingwebhookconfigurations",
])
.output()
.context("Failed to check webhooks")?;
if String::from_utf8_lossy(&output.stdout).contains("openbao-agent-injector-cfg") {
info!("Deleting conflicting OpenBao webhook...");
let _ = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"delete",
"mutatingwebhookconfiguration",
"openbao-agent-injector-cfg",
"--ignore-not-found=true",
])
.output();
}
Ok(())
}
async fn deploy_openbao(topology: &harmony::topology::K8sAnywhereTopology) -> anyhow::Result<()> {
info!("Deploying OpenBao...");
let openbao = OpenbaoScore {
host: OPENBAO_HOST.to_string(),
openshift: false,
};
let inventory = Inventory::autoload();
openbao
.interpret(&inventory, topology)
.await
.context("OpenBao deployment failed")?;
info!("OpenBao deployed successfully");
Ok(())
}
async fn wait_for_openbao_running() -> anyhow::Result<()> {
info!("Waiting for OpenBao pods to be running...");
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"wait",
"-n",
"openbao",
"--for=condition=podinitialized",
"pod/openbao-0",
"--timeout=120s",
])
.output()
.context("Failed to wait for OpenBao pod")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
info!(
"Pod initialized wait failed, trying alternative approach: {}",
stderr
);
}
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
info!("OpenBao pod is running (may be sealed)");
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
struct OpenBaoInitOutput {
#[serde(rename = "unseal_keys_b64")]
keys: Vec<String>,
#[serde(rename = "root_token")]
root_token: String,
}
async fn init_openbao() -> anyhow::Result<String> {
let data_path = get_openbao_data_path();
std::fs::create_dir_all(&data_path).context("Failed to create openbao data directory")?;
let keys_file = data_path.join("unseal-keys.json");
if keys_file.exists() {
info!("OpenBao already initialized, loading existing keys");
let content = std::fs::read_to_string(&keys_file)?;
let init_output: OpenBaoInitOutput = serde_json::from_str(&content)?;
return Ok(init_output.root_token);
}
info!("Initializing OpenBao...");
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"operator",
"init",
"-format=json",
])
.output()
.context("Failed to initialize OpenBao")?;
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
if stderr.contains("already initialized") {
info!("OpenBao is already initialized");
return Err(anyhow::anyhow!(
"OpenBao is already initialized but no keys file found. \
Please delete the cluster and try again: k3d cluster delete harmony-example"
));
}
if !output.status.success() {
return Err(anyhow::anyhow!(
"OpenBao init failed with status {}: {}",
output.status,
stderr
));
}
if stdout.trim().is_empty() {
return Err(anyhow::anyhow!(
"OpenBao init returned empty output. stderr: {}",
stderr
));
}
let init_output: OpenBaoInitOutput = serde_json::from_str(&stdout)?;
std::fs::write(&keys_file, serde_json::to_string_pretty(&init_output)?)?;
info!("OpenBao initialized successfully");
info!("Unseal keys saved to {:?}", keys_file);
Ok(init_output.root_token)
}
async fn unseal_openbao(root_token: &str) -> anyhow::Result<()> {
info!("Unsealing OpenBao...");
let status_output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"status",
"-format=json",
])
.output()
.context("Failed to get OpenBao status")?;
#[derive(Deserialize)]
struct StatusOutput {
sealed: bool,
}
if status_output.status.success() {
if let Ok(status) =
serde_json::from_str::<StatusOutput>(&String::from_utf8_lossy(&status_output.stdout))
{
if !status.sealed {
info!("OpenBao is already unsealed");
return Ok(());
}
}
}
let data_path = get_openbao_data_path();
let keys_file = data_path.join("unseal-keys.json");
let content = std::fs::read_to_string(&keys_file)?;
let init_output: OpenBaoInitOutput = serde_json::from_str(&content)?;
for key in &init_output.keys[0..3] {
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"bao",
"operator",
"unseal",
key,
])
.output()
.context("Failed to unseal OpenBao")?;
if !output.status.success() {
return Err(anyhow::anyhow!(
"Unseal failed: {}",
String::from_utf8_lossy(&output.stderr)
));
}
}
info!("OpenBao unsealed successfully");
Ok(())
}
async fn run_bao_command(root_token: &str, args: &[&str]) -> anyhow::Result<String> {
let command = args.join(" ");
let shell_command = format!("VAULT_TOKEN={} {}", root_token, command);
let output = Command::new("kubectl")
.args([
"--context",
"k3d-harmony-example",
"exec",
"-n",
"openbao",
"openbao-0",
"--",
"sh",
"-c",
&shell_command,
])
.output()
.context("Failed to run bao command")?;
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
if !output.status.success() {
return Err(anyhow::anyhow!("bao command failed: {}", stderr));
}
Ok(stdout.to_string())
}
async fn configure_openbao_admin_user(root_token: &str) -> anyhow::Result<()> {
info!("Configuring OpenBao with userpass auth...");
let _ = run_bao_command(root_token, &["bao", "auth", "enable", "userpass"]).await;
let _ = run_bao_command(
root_token,
&["bao", "secrets", "enable", "-path=secret", "kv-v2"],
)
.await;
run_bao_command(
root_token,
&[
"bao",
"write",
"auth/userpass/users/harmony",
"password=harmony-dev-password",
"policies=default",
],
)
.await?;
info!("OpenBao configured with userpass auth");
info!(" Username: harmony");
info!(" Password: harmony-dev-password");
info!(" Root token: {}", root_token);
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
info!("===========================================");
info!("Harmony SSO Example");
info!("Deploys Zitadel + OpenBao on k3d");
info!("===========================================");
ensure_k3d_cluster().await?;
info!("===========================================");
info!("Cluster '{}' is ready", CLUSTER_NAME);
info!(
"Zitadel will be available at: http://{}:{}",
ZITADEL_HOST, ZITADEL_PORT
);
info!(
"OpenBao will be available at: http://{}:{}",
OPENBAO_HOST, OPENBAO_PORT
);
info!("===========================================");
let topology = create_topology();
topology
.ensure_ready()
.await
.context("Failed to initialize topology")?;
cleanup_openbao_webhook().await?;
deploy_openbao(&topology).await?;
wait_for_openbao_running().await?;
let root_token = init_openbao().await?;
unseal_openbao(&root_token).await?;
configure_openbao_admin_user(&root_token).await?;
info!("===========================================");
info!("OpenBao initialized and configured!");
info!("===========================================");
info!("Zitadel: http://{}:{}", ZITADEL_HOST, ZITADEL_PORT);
info!("OpenBao: http://{}:{}", OPENBAO_HOST, OPENBAO_PORT);
info!("===========================================");
info!("OpenBao credentials:");
info!(" Username: harmony");
info!(" Password: harmony-dev-password");
info!("===========================================");
Ok(())
}

View File

@@ -6,6 +6,7 @@ use harmony::{
async fn main() {
let openbao = OpenbaoScore {
host: "openbao.sebastien.sto1.nationtech.io".to_string(),
openshift: false,
};
harmony_cli::run(

View File

@@ -7,6 +7,7 @@ async fn main() {
let zitadel = ZitadelScore {
host: "sso.sto1.nationtech.io".to_string(),
zitadel_version: "v4.12.1".to_string(),
external_secure: true,
};
harmony_cli::run(

View File

@@ -1,4 +1,5 @@
use async_trait::async_trait;
use brocade::{InterfaceConfig, InterfaceSpeed, PortChannelConfig, PortChannelId, Vlan};
use harmony_k8s::K8sClient;
use harmony_macros::ip;
use harmony_types::{
@@ -11,7 +12,7 @@ use log::info;
use crate::topology::{HelmCommand, PxeOptions};
use crate::{data::FileContent, executors::ExecutorError, topology::node_exporter::NodeExporter};
use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig};
use crate::infra::network_manager::OpenShiftNmStateNetworkManager;
use super::{
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
@@ -316,23 +317,57 @@ impl Switch for HAClusterTopology {
self.switch_client.find_port(mac_address).await
}
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
async fn configure_port_channel(
&self,
channel_id: PortChannelId,
config: &HostNetworkConfig,
) -> Result<(), SwitchError> {
debug!("Configuring port channel: {config:#?}");
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
self.switch_client
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
.configure_port_channel(
channel_id,
&format!("Harmony_{}", config.host_id),
switch_ports,
None,
)
.await
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
Ok(())
}
async fn configure_port_channel_from_config(
&self,
config: &PortChannelConfig,
) -> Result<(), SwitchError> {
self.switch_client
.configure_port_channel(
config.id,
&config.name,
config.ports.clone(),
config.speed.as_ref(),
)
.await
.map_err(|e| SwitchError::new(format!("Failed to create port-channel: {e}")))?;
Ok(())
}
async fn clear_port_channel(&self, _ids: &Vec<Id>) -> Result<(), SwitchError> {
todo!()
}
async fn configure_interface(&self, _ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
todo!()
async fn configure_interfaces(
&self,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
self.switch_client.configure_interfaces(interfaces).await
}
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.switch_client.create_vlan(vlan).await
}
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.switch_client.delete_vlan(vlan).await
}
}
@@ -592,15 +627,26 @@ impl SwitchClient for DummyInfra {
async fn configure_port_channel(
&self,
_channel_id: PortChannelId,
_channel_name: &str,
_switch_ports: Vec<PortLocation>,
_speed: Option<&InterfaceSpeed>,
) -> Result<u8, SwitchError> {
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
}
async fn clear_port_channel(&self, _ids: &Vec<Id>) -> Result<(), SwitchError> {
todo!()
}
async fn configure_interface(&self, _ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
async fn configure_interfaces(
&self,
_interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
todo!()
}
async fn create_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!()
}
async fn delete_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!()
}
}

View File

@@ -7,7 +7,7 @@ use std::{
};
use async_trait::async_trait;
use brocade::PortOperatingMode;
use brocade::{InterfaceConfig, InterfaceSpeed, PortChannelConfig, PortChannelId, Vlan};
use derive_new::new;
use harmony_k8s::K8sClient;
use harmony_types::{
@@ -220,8 +220,6 @@ impl From<String> for NetworkError {
}
}
pub type PortConfig = (PortLocation, PortOperatingMode);
#[async_trait]
pub trait Switch: Send + Sync {
async fn setup_switch(&self) -> Result<(), SwitchError>;
@@ -231,9 +229,24 @@ pub trait Switch: Send + Sync {
mac_address: &MacAddress,
) -> Result<Option<PortLocation>, SwitchError>;
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
async fn configure_port_channel(
&self,
channel_id: PortChannelId,
config: &HostNetworkConfig,
) -> Result<(), SwitchError>;
/// Creates a port-channel from a PortChannelConfig (id, name, member ports).
/// Does NOT configure L2 mode — use configure_interfaces for that.
async fn configure_port_channel_from_config(
&self,
config: &PortChannelConfig,
) -> Result<(), SwitchError>;
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
async fn configure_interfaces(
&self,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError>;
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError>;
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError>;
}
#[derive(Clone, Debug, PartialEq)]
@@ -290,12 +303,19 @@ pub trait SwitchClient: Debug + Send + Sync {
async fn configure_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
switch_ports: Vec<PortLocation>,
speed: Option<&InterfaceSpeed>,
) -> Result<u8, SwitchError>;
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
async fn configure_interfaces(
&self,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError>;
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError>;
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError>;
}
#[cfg(test)]

View File

@@ -1,16 +1,20 @@
use async_trait::async_trait;
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
use brocade::{
BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceConfig, InterfaceSpeed,
InterfaceStatus, PortChannelId, PortOperatingMode, Vlan,
};
use harmony_types::{
id::Id,
net::{IpAddress, MacAddress},
switch::{PortDeclaration, PortLocation},
};
use log::{info, warn};
use log::info;
use option_ext::OptionExt;
use crate::{
modules::brocade::BrocadeSwitchAuth,
topology::{PortConfig, SwitchClient, SwitchError},
topology::{SwitchClient, SwitchError},
};
#[derive(Debug, Clone)]
@@ -54,7 +58,7 @@ impl SwitchClient for BrocadeSwitchClient {
info!("Brocade found interfaces {interfaces:#?}");
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
let interfaces: Vec<InterfaceConfig> = interfaces
.into_iter()
.filter(|interface| {
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
@@ -65,7 +69,16 @@ impl SwitchClient for BrocadeSwitchClient {
|| link.remote_port.contains(&interface.port_location)
})
})
.map(|interface| (interface.name.clone(), PortOperatingMode::Trunk))
.map(|interface| InterfaceConfig {
interface: brocade::SwitchInterface::Ethernet(
interface.interface_type.clone(),
interface.port_location.clone(),
),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: None,
speed: None,
})
.collect();
if interfaces.is_empty() {
@@ -114,50 +127,19 @@ impl SwitchClient for BrocadeSwitchClient {
async fn configure_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
switch_ports: Vec<PortLocation>,
speed: Option<&InterfaceSpeed>,
) -> Result<u8, SwitchError> {
let mut channel_id = self
.brocade
.find_available_channel_id()
self.brocade
.create_port_channel(channel_id, channel_name, &switch_ports, speed)
.await
.map_err(|e| SwitchError::new(format!("{e}")))?;
info!("Found next available channel id : {channel_id}");
loop {
match self
.brocade
.create_port_channel(channel_id, channel_name, &switch_ports)
.await
.map_err(|e| SwitchError::new(format!("{e}")))
{
Ok(_) => {
info!(
"Successfully configured port channel {channel_id} {channel_name} for ports {switch_ports:?}"
);
break;
}
Err(e) => {
warn!(
"Could not configure port channel {channel_id} {channel_name} for ports {switch_ports:?}"
);
let previous_id = channel_id;
while previous_id == channel_id {
channel_id = inquire::Text::new(
"Type the port channel number to use (or CTRL+C to exit) :",
)
.prompt()
.map_err(|e| {
SwitchError::new(format!("Failed to prompt for channel id : {e}"))
})?
.parse()
.unwrap_or(channel_id);
}
}
}
}
info!(
"Successfully configured port channel {channel_id} {channel_name} for ports {switch_ports:?}"
);
Ok(channel_id)
}
@@ -170,14 +152,28 @@ impl SwitchClient for BrocadeSwitchClient {
}
Ok(())
}
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
// FIXME hardcoded TenGigabitEthernet = bad
let ports = ports
.iter()
.map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone()))
.collect();
async fn configure_interfaces(
&self,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
self.brocade
.configure_interfaces(&ports)
.configure_interfaces(interfaces)
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
Ok(())
}
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.brocade
.create_vlan(vlan)
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
Ok(())
}
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.brocade
.delete_vlan(vlan)
.await
.map_err(|e| SwitchError::new(e.to_string()))?;
Ok(())
@@ -208,8 +204,10 @@ impl SwitchClient for UnmanagedSwitch {
async fn configure_port_channel(
&self,
channel_name: &str,
switch_ports: Vec<PortLocation>,
_channel_id: PortChannelId,
_channel_name: &str,
_switch_ports: Vec<PortLocation>,
_speed: Option<&InterfaceSpeed>,
) -> Result<u8, SwitchError> {
todo!("unmanaged switch. Nothing to do.")
}
@@ -217,8 +215,19 @@ impl SwitchClient for UnmanagedSwitch {
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
todo!("unmanged switch. Nothing to do.")
}
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
todo!("unmanged switch. Nothing to do.")
async fn configure_interfaces(
&self,
_interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
todo!("unmanaged switch. Nothing to do.")
}
async fn create_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!("unmanaged switch. Nothing to do.")
}
async fn delete_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!("unmanaged switch. Nothing to do.")
}
}
@@ -229,8 +238,9 @@ mod tests {
use assertor::*;
use async_trait::async_trait;
use brocade::{
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, SecurityLevel,
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceConfig, InterfaceInfo,
InterfaceSpeed, InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId,
PortOperatingMode, SecurityLevel, Vlan,
};
use harmony_types::switch::PortLocation;
@@ -258,8 +268,26 @@ mod tests {
//TODO not sure about this
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
assert_that!(*configured_interfaces).contains_exactly(vec![
(first_interface.name.clone(), PortOperatingMode::Trunk),
(second_interface.name.clone(), PortOperatingMode::Trunk),
InterfaceConfig {
interface: brocade::SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 1),
),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: None,
speed: None,
},
InterfaceConfig {
interface: brocade::SwitchInterface::Ethernet(
InterfaceType::TenGigabitEthernet,
PortLocation(1, 0, 4),
),
mode: PortOperatingMode::Trunk,
access_vlan: None,
trunk_vlans: None,
speed: None,
},
]);
}
@@ -343,7 +371,7 @@ mod tests {
struct FakeBrocadeClient {
stack_topology: Vec<InterSwitchLink>,
interfaces: Vec<InterfaceInfo>,
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
configured_interfaces: Arc<Mutex<Vec<InterfaceConfig>>>,
}
#[async_trait]
@@ -366,7 +394,7 @@ mod tests {
async fn configure_interfaces(
&self,
interfaces: &Vec<(String, PortOperatingMode)>,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), Error> {
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
*configured_interfaces = interfaces.clone();
@@ -374,6 +402,14 @@ mod tests {
Ok(())
}
async fn create_vlan(&self, _vlan: &Vlan) -> Result<(), Error> {
todo!()
}
async fn delete_vlan(&self, _vlan: &Vlan) -> Result<(), Error> {
todo!()
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
todo!()
}
@@ -383,10 +419,15 @@ mod tests {
_channel_id: PortChannelId,
_channel_name: &str,
_ports: &[PortLocation],
_speed: Option<&InterfaceSpeed>,
) -> Result<(), Error> {
todo!()
}
async fn reset_interface(&self, _interface: &str) -> Result<(), Error> {
todo!()
}
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
todo!()
}
@@ -418,7 +459,7 @@ mod tests {
let interface_type = self
.interface_type
.clone()
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
.unwrap_or(InterfaceType::TenGigabitEthernet);
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
let name = format!("{interface_type} {port_location}");
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);

View File

@@ -1,5 +1,5 @@
use async_trait::async_trait;
use brocade::{BrocadeOptions, PortOperatingMode};
use brocade::{BrocadeOptions, InterfaceConfig, PortChannelConfig, PortChannelId, PortOperatingMode, Vlan};
use crate::{
data::Version,
@@ -8,7 +8,7 @@ use crate::{
inventory::Inventory,
score::Score,
topology::{
HostNetworkConfig, PortConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
HostNetworkConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
SwitchError, Topology,
},
};
@@ -20,7 +20,7 @@ use serde::Serialize;
#[derive(Clone, Debug, Serialize)]
pub struct BrocadeSwitchScore {
pub port_channels_to_clear: Vec<Id>,
pub ports_to_configure: Vec<PortConfig>,
pub ports_to_configure: Vec<InterfaceConfig>,
}
impl<T: Topology + Switch> Score<T> for BrocadeSwitchScore {
@@ -59,7 +59,7 @@ impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchInterpret {
.map_err(|e| InterpretError::new(e.to_string()))?;
debug!("Configuring interfaces {:?}", self.score.ports_to_configure);
topology
.configure_interface(&self.score.ports_to_configure)
.configure_interfaces(&self.score.ports_to_configure)
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
Ok(Outcome::success("switch configured".to_string()))
@@ -126,13 +126,43 @@ impl Switch for SwitchTopology {
todo!()
}
async fn configure_port_channel(&self, _config: &HostNetworkConfig) -> Result<(), SwitchError> {
async fn configure_port_channel(
&self,
_channel_id: PortChannelId,
_config: &HostNetworkConfig,
) -> Result<(), SwitchError> {
todo!()
}
async fn configure_port_channel_from_config(
&self,
config: &PortChannelConfig,
) -> Result<(), SwitchError> {
self.client
.configure_port_channel(
config.id,
&config.name,
config.ports.clone(),
config.speed.as_ref(),
)
.await
.map_err(|e| SwitchError::new(format!("Failed to create port-channel: {e}")))?;
Ok(())
}
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
self.client.clear_port_channel(ids).await
}
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
self.client.configure_interface(ports).await
async fn configure_interfaces(
&self,
interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
self.client.configure_interfaces(interfaces).await
}
async fn create_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.client.create_vlan(vlan).await
}
async fn delete_vlan(&self, vlan: &Vlan) -> Result<(), SwitchError> {
self.client.delete_vlan(vlan).await
}
}

View File

@@ -0,0 +1,179 @@
use async_trait::async_trait;
use brocade::{InterfaceConfig, PortChannelConfig, Vlan};
use harmony_types::id::Id;
use log::{debug, info};
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{Switch, SwitchError, Topology},
};
#[derive(Clone, Debug, Serialize)]
pub struct BrocadeSwitchConfigurationScore {
/// VLANs to create on the switch. Define once, reference everywhere.
pub vlans: Vec<Vlan>,
/// Standalone interfaces (NOT members of a port-channel).
/// Each has its own VLAN/mode configuration.
pub interfaces: Vec<InterfaceConfig>,
/// Port-channels: bundles of ports with VLAN/mode config
/// applied on the logical port-channel interface, not on the members.
pub port_channels: Vec<PortChannelConfig>,
}
impl<T: Topology + Switch> Score<T> for BrocadeSwitchConfigurationScore {
fn name(&self) -> String {
"BrocadeSwitchConfigurationScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(BrocadeSwitchConfigurationInterpret {
score: self.clone(),
})
}
}
#[derive(Debug)]
struct BrocadeSwitchConfigurationInterpret {
score: BrocadeSwitchConfigurationScore,
}
#[async_trait]
impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchConfigurationInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
self.create_vlans(topology).await?;
self.create_port_channels(topology).await?;
self.configure_port_channel_interfaces(topology).await?;
self.configure_standalone_interfaces(topology).await?;
Ok(Outcome::success(
"Switch configuration applied successfully".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("BrocadeSwitchConfigurationInterpret")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl BrocadeSwitchConfigurationInterpret {
async fn create_vlans<T: Topology + Switch>(
&self,
topology: &T,
) -> Result<(), InterpretError> {
for vlan in &self.score.vlans {
info!("Creating VLAN {} ({})", vlan.id, vlan.name);
topology
.create_vlan(vlan)
.await
.map_err(|e| InterpretError::new(format!("Failed to create VLAN {}: {e}", vlan.id)))?;
}
Ok(())
}
async fn create_port_channels<T: Topology + Switch>(
&self,
topology: &T,
) -> Result<(), InterpretError> {
for pc in &self.score.port_channels {
info!(
"Creating port-channel {} ({}) with ports: {:?}",
pc.id, pc.name, pc.ports
);
topology
.configure_port_channel_from_config(pc)
.await
.map_err(|e| {
InterpretError::new(format!(
"Failed to create port-channel {} ({}): {e}",
pc.id, pc.name
))
})?;
}
Ok(())
}
async fn configure_port_channel_interfaces<T: Topology + Switch>(
&self,
topology: &T,
) -> Result<(), InterpretError> {
let pc_interfaces: Vec<InterfaceConfig> = self
.score
.port_channels
.iter()
.map(|pc| InterfaceConfig {
interface: brocade::SwitchInterface::PortChannel(pc.id),
mode: pc.mode.clone(),
access_vlan: pc.access_vlan.as_ref().map(|v| v.id),
trunk_vlans: pc.trunk_vlans.clone(),
speed: pc.speed.clone(),
})
.collect();
if !pc_interfaces.is_empty() {
info!(
"Configuring L2 mode on {} port-channel interface(s)",
pc_interfaces.len()
);
for pc in &self.score.port_channels {
debug!(
" port-channel {} ({}): mode={:?}, vlans={:?}, speed={:?}",
pc.id, pc.name, pc.mode, pc.trunk_vlans, pc.speed
);
}
topology
.configure_interfaces(&pc_interfaces)
.await
.map_err(|e| {
InterpretError::new(format!(
"Failed to configure port-channel interfaces: {e}"
))
})?;
}
Ok(())
}
async fn configure_standalone_interfaces<T: Topology + Switch>(
&self,
topology: &T,
) -> Result<(), InterpretError> {
if !self.score.interfaces.is_empty() {
info!(
"Configuring {} standalone interface(s)",
self.score.interfaces.len()
);
for iface in &self.score.interfaces {
debug!(
" {}: mode={:?}, speed={:?}",
iface.interface, iface.mode, iface.speed
);
}
topology
.configure_interfaces(&self.score.interfaces)
.await
.map_err(|e| {
InterpretError::new(format!("Failed to configure interfaces: {e}"))
})?;
}
Ok(())
}
}

View File

@@ -3,3 +3,6 @@ pub use brocade::*;
pub mod brocade_snmp;
pub use brocade_snmp::*;
pub mod brocade_switch_configuration;
pub use brocade_switch_configuration::*;

View File

@@ -1,6 +1,7 @@
use std::str::FromStr;
use async_trait::async_trait;
use brocade::{InterfaceConfig, PortChannelConfig, PortChannelId, Vlan};
use harmony_types::{id::Id, switch::PortLocation};
use log::{error, info, warn};
use serde::Serialize;
@@ -11,7 +12,10 @@ use crate::{
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort, Topology},
topology::{
HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort,
Topology,
},
};
/// Configures high-availability networking for a set of physical hosts.
@@ -152,8 +156,9 @@ impl HostNetworkConfigurationInterpret {
InterpretError::new(format!("Failed to configure host network: {e}"))
})?;
let channel_id = todo!("Determine port-channel ID for this host");
topology
.configure_port_channel(&config)
.configure_port_channel(channel_id, &config)
.await
.map_err(|e| {
InterpretError::new(format!("Failed to configure host network: {e}"))
@@ -389,7 +394,7 @@ mod tests {
use crate::{
hardware::HostCategory,
topology::{
HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome,
HostNetworkConfig, NetworkError, PreparationError, PreparationOutcome,
SwitchError, SwitchPort,
},
};
@@ -836,6 +841,7 @@ mod tests {
async fn configure_port_channel(
&self,
_channel_id: PortChannelId,
config: &HostNetworkConfig,
) -> Result<(), SwitchError> {
let mut configured_port_channels = self.configured_port_channels.lock().unwrap();
@@ -843,14 +849,26 @@ mod tests {
Ok(())
}
async fn configure_port_channel_from_config(
&self,
_config: &PortChannelConfig,
) -> Result<(), SwitchError> {
todo!()
}
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
todo!()
}
async fn configure_interface(
async fn configure_interfaces(
&self,
port_config: &Vec<PortConfig>,
_interfaces: &Vec<InterfaceConfig>,
) -> Result<(), SwitchError> {
todo!()
}
async fn create_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!()
}
async fn delete_vlan(&self, _vlan: &Vlan) -> Result<(), SwitchError> {
todo!()
}
}
}

View File

@@ -15,6 +15,9 @@ use crate::{
pub struct OpenbaoScore {
/// Host used for external access (ingress)
pub host: String,
/// Set to true when deploying to OpenShift. Defaults to false for k3d/Kubernetes.
#[serde(default)]
pub openshift: bool,
}
impl<T: Topology + K8sclient + HelmCommand> Score<T> for OpenbaoScore {
@@ -24,12 +27,12 @@ impl<T: Topology + K8sclient + HelmCommand> Score<T> for OpenbaoScore {
#[doc(hidden)]
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
// TODO exec pod commands to initialize secret store if not already done
let host = &self.host;
let openshift = self.openshift;
let values_yaml = Some(format!(
r#"global:
openshift: true
openshift: {openshift}
server:
standalone:
enabled: true

View File

@@ -1,3 +1,4 @@
use harmony_k8s::KubernetesDistribution;
use k8s_openapi::api::core::v1::Namespace;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
use k8s_openapi::{ByteString, api::core::v1::Secret};
@@ -63,6 +64,20 @@ pub struct ZitadelScore {
/// External domain (e.g. `"auth.example.com"`).
pub host: String,
pub zitadel_version: String,
/// Set to false for local k3d development (uses HTTP instead of HTTPS).
/// Defaults to true for production deployments.
#[serde(default)]
pub external_secure: bool,
}
impl Default for ZitadelScore {
fn default() -> Self {
Self {
host: Default::default(),
zitadel_version: "v4.12.1".to_string(),
external_secure: true,
}
}
}
impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelScore {
@@ -75,6 +90,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelSco
Box::new(ZitadelInterpret {
host: self.host.clone(),
zitadel_version: self.zitadel_version.clone(),
external_secure: self.external_secure,
})
}
}
@@ -85,6 +101,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Score<T> for ZitadelSco
struct ZitadelInterpret {
host: String,
zitadel_version: String,
external_secure: bool,
}
#[async_trait]
@@ -222,7 +239,20 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
let admin_password = generate_secure_password(16);
// --- Step 3: Create masterkey secret ------------------------------------
// --- Step 3: Get k8s client and detect distribution -------------------
let k8s_client = topology
.k8s_client()
.await
.map_err(|e| InterpretError::new(format!("Failed to get k8s client: {e}")))?;
let distro = k8s_client.get_k8s_distribution().await.map_err(|e| {
InterpretError::new(format!("Failed to detect k8s distribution: {}", e))
})?;
info!("[Zitadel] Detected Kubernetes distribution: {:?}", distro);
// --- Step 4: Create masterkey secret ------------------------------------
debug!(
"[Zitadel] Creating masterkey secret '{}' in namespace '{}'",
@@ -254,13 +284,7 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
..Secret::default()
};
match topology
.k8s_client()
.await
.map_err(|e| InterpretError::new(format!("Failed to get k8s client : {e}")))?
.create(&masterkey_secret, Some(NAMESPACE))
.await
{
match k8s_client.create(&masterkey_secret, Some(NAMESPACE)).await {
Ok(_) => {
info!(
"[Zitadel] Masterkey secret '{}' created",
@@ -288,16 +312,15 @@ impl<T: Topology + K8sclient + HelmCommand + PostgreSQL> Interpret<T> for Zitade
MASTERKEY_SECRET_NAME
);
// --- Step 4: Build Helm values ------------------------------------
// --- Step 5: Build Helm values ------------------------------------
warn!(
"[Zitadel] Applying TLS-enabled ingress defaults for OKD/OpenShift. \
cert-manager annotations are included as optional hints and are \
ignored on clusters without cert-manager."
);
let values_yaml = format!(
r#"image:
let values_yaml = match distro {
KubernetesDistribution::OpenshiftFamily => {
warn!(
"[Zitadel] Applying OpenShift-specific ingress with TLS and cert-manager annotations."
);
format!(
r#"image:
tag: {zitadel_version}
zitadel:
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
@@ -330,8 +353,6 @@ zitadel:
Username: postgres
SSL:
Mode: require
# Directly import credentials from the postgres secret
# TODO : use a less privileged postgres user
env:
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
valueFrom:
@@ -353,7 +374,6 @@ env:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
# Security context for OpenShift restricted PSA compliance
podSecurityContext:
runAsNonRoot: true
runAsUser: null
@@ -370,7 +390,6 @@ securityContext:
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Init job security context (runs before main deployment)
initJob:
podSecurityContext:
runAsNonRoot: true
@@ -388,7 +407,6 @@ initJob:
fsGroup: null
seccompProfile:
type: RuntimeDefault
# Setup job security context
setupJob:
podSecurityContext:
runAsNonRoot: true
@@ -417,10 +435,9 @@ ingress:
- path: /
pathType: Prefix
tls:
- hosts:
- secretName: zitadel-tls
hosts:
- "{host}"
secretName: "{host}-tls"
login:
enabled: true
podSecurityContext:
@@ -450,15 +467,177 @@ login:
- path: /ui/v2/login
pathType: Prefix
tls:
- hosts:
- "{host}"
secretName: "{host}-tls""#,
zitadel_version = self.zitadel_version
);
- secretName: zitadel-login-tls
hosts:
- "{host}""#,
zitadel_version = self.zitadel_version,
host = host,
db_host = db_host,
db_port = db_port,
admin_password = admin_password,
pg_superuser_secret = pg_superuser_secret
)
}
KubernetesDistribution::K3sFamily | KubernetesDistribution::Default => {
warn!("[Zitadel] Applying k3d/generic ingress without TLS (HTTP only).");
format!(
r#"image:
tag: {zitadel_version}
zitadel:
masterkeySecretName: "{MASTERKEY_SECRET_NAME}"
configmapConfig:
ExternalDomain: "{host}"
ExternalSecure: false
FirstInstance:
Org:
Human:
UserName: "admin"
Password: "{admin_password}"
FirstName: "Zitadel"
LastName: "Admin"
Email: "admin@zitadel.example.com"
PasswordChangeRequired: true
TLS:
Enabled: false
Database:
Postgres:
Host: "{db_host}"
Port: {db_port}
Database: zitadel
MaxOpenConns: 20
MaxIdleConns: 10
User:
Username: postgres
SSL:
Mode: require
Admin:
Username: postgres
SSL:
Mode: require
env:
- name: ZITADEL_DATABASE_POSTGRES_USER_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: user
- name: ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: "{pg_superuser_secret}"
key: password
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
initJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
setupJob:
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
hosts:
- host: "{host}"
paths:
- path: /
pathType: Prefix
tls: []
login:
enabled: true
podSecurityContext:
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: null
fsGroup: null
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
hosts:
- host: "{host}"
paths:
- path: /ui/v2/login
pathType: Prefix
tls: []"#,
zitadel_version = self.zitadel_version,
host = host,
db_host = db_host,
db_port = db_port,
admin_password = admin_password,
pg_superuser_secret = pg_superuser_secret
)
}
};
trace!("[Zitadel] Helm values YAML:\n{values_yaml}");
// --- Step 5: Deploy Helm chart ------------------------------------
// --- Step 6: Deploy Helm chart ------------------------------------
info!(
"[Zitadel] Deploying Helm chart 'zitadel/zitadel' as release 'zitadel' in namespace '{NAMESPACE}'"
@@ -482,17 +661,25 @@ login:
.interpret(inventory, topology)
.await;
let protocol = if self.external_secure {
"https"
} else {
"http"
};
match &result {
Ok(_) => info!(
"[Zitadel] Helm chart deployed successfully\n\n\
===== ZITADEL DEPLOYMENT COMPLETE =====\n\
Login URL: https://{host}\n\
Username: admin@zitadel.{host}\n\
Login URL: {protocol}://{host}\n\
Username: admin\n\
Password: {admin_password}\n\n\
IMPORTANT: The password is saved in ConfigMap 'zitadel-config-yaml'\n\
and must be changed on first login. Save the credentials in a\n\
secure location after changing them.\n\
========================================="
=========================================",
protocol = protocol,
host = self.host,
admin_password = admin_password
),
Err(e) => error!("[Zitadel] Helm chart deployment failed: {e}"),
}

View File

@@ -6,7 +6,7 @@ readme.workspace = true
license.workspace = true
[dependencies]
clap = { version = "4.5.35", features = ["derive"] }
clap.workspace = true
tokio.workspace = true
env_logger.workspace = true
log.workspace = true

View File

@@ -18,6 +18,9 @@ interactive-parse = "0.1.5"
log.workspace = true
directories.workspace = true
inquire.workspace = true
sqlx = { workspace = true, features = ["runtime-tokio", "sqlite"] }
anyhow.workspace = true
env_logger.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true

View File

@@ -0,0 +1,88 @@
//! Basic example showing harmony_config with SQLite backend
//!
//! This example demonstrates:
//! - Zero-setup SQLite backend (no configuration needed)
//! - Using the `#[derive(Config)]` macro
//! - Environment variable override (HARMONY_CONFIG_TestConfig overrides SQLite)
//! - Direct set/get operations (prompting requires a TTY)
//!
//! Run with:
//! - `cargo run --example basic` - creates/reads config from SQLite
//! - `HARMONY_CONFIG_TestConfig='{"name":"from_env","count":42}' cargo run --example basic` - uses env var
use std::sync::Arc;
use harmony_config::{Config, ConfigManager, EnvSource, SqliteSource};
use log::info;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Config)]
struct TestConfig {
name: String,
count: u32,
}
impl Default for TestConfig {
fn default() -> Self {
Self {
name: "default_name".to_string(),
count: 0,
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let sqlite = SqliteSource::default().await?;
let manager = ConfigManager::new(vec![Arc::new(EnvSource), Arc::new(sqlite)]);
info!("1. Attempting to get TestConfig (expect NotFound on first run)...");
match manager.get::<TestConfig>().await {
Ok(config) => {
info!(" Found config: {:?}", config);
}
Err(harmony_config::ConfigError::NotFound { .. }) => {
info!(" NotFound - as expected on first run");
}
Err(e) => {
info!(" Error: {:?}", e);
}
}
info!("\n2. Setting config directly...");
let config = TestConfig {
name: "from_code".to_string(),
count: 42,
};
manager.set(&config).await?;
info!(" Set config: {:?}", config);
info!("\n3. Getting config back from SQLite...");
let retrieved: TestConfig = manager.get().await?;
info!(" Retrieved: {:?}", retrieved);
info!("\n4. Using env override...");
info!(" Env var HARMONY_CONFIG_TestConfig overrides SQLite");
let env_config = TestConfig {
name: "from_env".to_string(),
count: 99,
};
unsafe {
std::env::set_var(
"HARMONY_CONFIG_TestConfig",
serde_json::to_string(&env_config)?,
);
}
let from_env: TestConfig = manager.get().await?;
info!(" Got from env: {:?}", from_env);
unsafe {
std::env::remove_var("HARMONY_CONFIG_TestConfig");
}
info!("\nDone! Config persisted at ~/.local/share/harmony/config/config.db");
Ok(())
}

View File

@@ -0,0 +1,190 @@
//! End-to-end example: harmony_config with OpenBao as a ConfigSource
//!
//! This example demonstrates the full config resolution chain:
//! EnvSource → SqliteSource → StoreSource<OpenbaoSecretStore>
//!
//! When OpenBao is unreachable, the chain gracefully falls through to SQLite.
//!
//! **Prerequisites**:
//! - OpenBao must be initialized and unsealed
//! - KV v2 engine must be enabled at the `OPENBAO_KV_MOUNT` path (default: `secret`)
//! - Auth method must be enabled at the `OPENBAO_AUTH_MOUNT` path (default: `userpass`)
//!
//! **Environment variables**:
//! - `OPENBAO_URL` (required for OpenBao): URL of the OpenBao server
//! - `OPENBAO_TOKEN` (optional): Use token auth instead of userpass
//! - `OPENBAO_USERNAME` + `OPENBAO_PASSWORD` (optional): Userpass auth
//! - `OPENBAO_KV_MOUNT` (default: `secret`): KV v2 engine mount path
//! - `OPENBAO_AUTH_MOUNT` (default: `userpass`): Auth method mount path
//! - `OPENBAO_SKIP_TLS` (default: `false`): Skip TLS verification
//! - `HARMONY_SSO_URL` + `HARMONY_SSO_CLIENT_ID` (optional): Zitadel OIDC device flow (RFC 8628)
//!
//! **Run**:
//! ```bash
//! # Without OpenBao (SqliteSource only):
//! cargo run --example openbao_chain
//!
//! # With OpenBao (full chain):
//! export OPENBAO_URL="http://127.0.0.1:8200"
//! export OPENBAO_TOKEN="<your-token>"
//! cargo run --example openbao_chain
//! ```
//!
//! **Setup OpenBao** (if needed):
//! ```bash
//! # Port-forward to local OpenBao
//! kubectl port-forward svc/openbao -n openbao 8200:8200 &
//!
//! # Initialize (one-time)
//! kubectl exec -n openbao openbao-0 -- bao operator init
//!
//! # Enable KV and userpass (one-time)
//! kubectl exec -n openbao openbao-0 -- bao secrets enable -path=secret kv-v2
//! kubectl exec -n openbao openbao-0 -- bao auth enable userpass
//!
//! # Create test user
//! kubectl exec -n openbao openbao-0 -- bao write auth/userpass/users/testuser \
//! password="testpass" policies="default"
//! ```
use std::sync::Arc;
use harmony_config::{Config, ConfigManager, ConfigSource, EnvSource, SqliteSource, StoreSource};
use harmony_secret::OpenbaoSecretStore;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialEq)]
struct AppConfig {
host: String,
port: u16,
}
impl Default for AppConfig {
fn default() -> Self {
Self {
host: "localhost".to_string(),
port: 8080,
}
}
}
impl Config for AppConfig {
const KEY: &'static str = "AppConfig";
}
async fn build_manager() -> ConfigManager {
let sqlite = Arc::new(
SqliteSource::default()
.await
.expect("Failed to open SQLite database"),
);
let env_source: Arc<dyn ConfigSource> = Arc::new(EnvSource);
let openbao_url = std::env::var("OPENBAO_URL")
.or_else(|_| std::env::var("VAULT_ADDR"))
.ok();
match openbao_url {
Some(url) => {
let kv_mount =
std::env::var("OPENBAO_KV_MOUNT").unwrap_or_else(|_| "secret".to_string());
let auth_mount =
std::env::var("OPENBAO_AUTH_MOUNT").unwrap_or_else(|_| "userpass".to_string());
let skip_tls = std::env::var("OPENBAO_SKIP_TLS")
.map(|v| v == "true")
.unwrap_or(false);
match OpenbaoSecretStore::new(
url,
kv_mount,
auth_mount,
skip_tls,
std::env::var("OPENBAO_TOKEN").ok(),
std::env::var("OPENBAO_USERNAME").ok(),
std::env::var("OPENBAO_PASSWORD").ok(),
std::env::var("HARMONY_SSO_URL").ok(),
std::env::var("HARMONY_SSO_CLIENT_ID").ok(),
)
.await
{
Ok(store) => {
let store_source: Arc<dyn ConfigSource> =
Arc::new(StoreSource::new("harmony".to_string(), store));
println!("OpenBao connected. Full chain: env → sqlite → openbao");
ConfigManager::new(vec![env_source, Arc::clone(&sqlite) as _, store_source])
}
Err(e) => {
eprintln!(
"Warning: OpenBao unavailable ({e}), using local chain: env → sqlite"
);
ConfigManager::new(vec![env_source, sqlite])
}
}
}
None => {
println!("OPENBAO_URL not set. Using local chain: env → sqlite");
ConfigManager::new(vec![env_source, sqlite])
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let manager = build_manager().await;
println!("\n=== harmony_config OpenBao Chain Demo ===\n");
println!("1. Attempting to get AppConfig (expect NotFound on first run)...");
match manager.get::<AppConfig>().await {
Ok(config) => {
println!(" Found: {:?}", config);
}
Err(harmony_config::ConfigError::NotFound { .. }) => {
println!(" NotFound - no config stored yet");
}
Err(e) => {
println!(" Error: {:?}", e);
}
}
println!("\n2. Setting AppConfig via set()...");
let config = AppConfig {
host: "production.example.com".to_string(),
port: 443,
};
manager.set(&config).await?;
println!(" Set: {:?}", config);
println!("\n3. Getting AppConfig back...");
let retrieved: AppConfig = manager.get().await?;
println!(" Retrieved: {:?}", retrieved);
assert_eq!(config, retrieved);
println!("\n4. Demonstrating env override...");
println!(" HARMONY_CONFIG_AppConfig env var overrides all backends");
let env_config = AppConfig {
host: "env-override.example.com".to_string(),
port: 9090,
};
unsafe {
std::env::set_var(
"HARMONY_CONFIG_AppConfig",
serde_json::to_string(&env_config)?,
);
}
let from_env: AppConfig = manager.get().await?;
println!(" Got from env: {:?}", from_env);
assert_eq!(env_config.host, "env-override.example.com");
unsafe {
std::env::remove_var("HARMONY_CONFIG_AppConfig");
}
println!("\n=== Done! ===");
println!("Config persisted at ~/.local/share/harmony/config/config.db");
Ok(())
}

View File

@@ -0,0 +1,69 @@
//! Example demonstrating configuration prompting with harmony_config
//!
//! This example shows how to use `get_or_prompt()` to interactively
//! ask the user for configuration values when none are found.
//!
//! **Note**: This example requires a TTY to work properly since it uses
//! interactive prompting via `inquire`. Run in a terminal.
//!
//! Run with:
//! - `cargo run --example prompting` - will prompt for values interactively
//! - If config exists in SQLite, it will be used directly without prompting
use std::sync::Arc;
use harmony_config::{Config, ConfigManager, EnvSource, PromptSource, SqliteSource};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Config)]
struct UserConfig {
username: String,
email: String,
theme: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let sqlite = SqliteSource::default().await?;
let manager = ConfigManager::new(vec![
Arc::new(EnvSource),
Arc::new(sqlite),
Arc::new(PromptSource::new()),
]);
println!("UserConfig Setup");
println!("=================\n");
println!("Attempting to get UserConfig (env > sqlite > prompt)...\n");
match manager.get::<UserConfig>().await {
Ok(config) => {
println!("Found existing config:");
println!(" Username: {}", config.username);
println!(" Email: {}", config.email);
println!(" Theme: {}", config.theme);
println!("\nNo prompting needed - using stored config.");
}
Err(harmony_config::ConfigError::NotFound { .. }) => {
println!("No config found in env or SQLite.");
println!("Calling get_or_prompt() to interactively request config...\n");
let config: UserConfig = manager.get_or_prompt().await?;
println!("\nConfig received and saved to SQLite:");
println!(" Username: {}", config.username);
println!(" Email: {}", config.email);
println!(" Theme: {}", config.theme);
}
Err(e) => {
println!("Error: {:?}", e);
}
}
println!("\nConfig is persisted at ~/.local/share/harmony/config/config.db");
println!("On next run, the stored config will be used without prompting.");
Ok(())
}

View File

@@ -15,6 +15,7 @@ pub use harmony_config_derive::Config;
pub use source::env::EnvSource;
pub use source::local_file::LocalFileSource;
pub use source::prompt::PromptSource;
pub use source::sqlite::SqliteSource;
pub use source::store::StoreSource;
#[derive(Debug, Error)]
@@ -51,6 +52,9 @@ pub enum ConfigError {
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("SQLite error: {0}")]
SqliteError(String),
}
pub trait Config: Serialize + DeserializeOwned + JsonSchema + InteractiveParseObj + Sized {
@@ -62,6 +66,10 @@ pub trait ConfigSource: Send + Sync {
async fn get(&self, key: &str) -> Result<Option<serde_json::Value>, ConfigError>;
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError>;
fn should_persist(&self) -> bool {
true
}
}
pub struct ConfigManager {
@@ -97,20 +105,18 @@ impl ConfigManager {
let config =
T::parse_to_obj().map_err(|e| ConfigError::PromptError(e.to_string()))?;
let value =
serde_json::to_value(&config).map_err(|e| ConfigError::Serialization {
key: T::KEY.to_string(),
source: e,
})?;
for source in &self.sources {
if let Err(e) = source
.set(
T::KEY,
&serde_json::to_value(&config).map_err(|e| {
ConfigError::Serialization {
key: T::KEY.to_string(),
source: e,
}
})?,
)
.await
{
debug!("Failed to save config to source: {e}");
if !source.should_persist() {
continue;
}
if source.set(T::KEY, &value).await.is_ok() {
break;
}
}
@@ -175,6 +181,7 @@ pub fn default_config_dir() -> Option<PathBuf> {
#[cfg(test)]
mod tests {
use super::*;
use harmony_secret::{SecretStore, SecretStoreError};
use pretty_assertions::assert_eq;
use serde::{Deserialize, Serialize};
use std::sync::Mutex;
@@ -460,4 +467,354 @@ mod tests {
assert_eq!(parsed, config);
}
#[tokio::test]
async fn test_sqlite_set_and_get() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_path_buf();
let source = SqliteSource::open(path).await.unwrap();
let config = TestConfig {
name: "sqlite_test".to_string(),
count: 42,
};
source
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result = source.get("TestConfig").await.unwrap().unwrap();
let parsed: TestConfig = serde_json::from_value(result).unwrap();
assert_eq!(parsed, config);
}
#[tokio::test]
async fn test_sqlite_get_returns_none_when_missing() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_path_buf();
let source = SqliteSource::open(path).await.unwrap();
let result = source.get("NonExistentConfig").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_sqlite_overwrites_on_set() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_path_buf();
let source = SqliteSource::open(path).await.unwrap();
let config1 = TestConfig {
name: "first".to_string(),
count: 1,
};
let config2 = TestConfig {
name: "second".to_string(),
count: 2,
};
source
.set("TestConfig", &serde_json::to_value(&config1).unwrap())
.await
.unwrap();
source
.set("TestConfig", &serde_json::to_value(&config2).unwrap())
.await
.unwrap();
let result = source.get("TestConfig").await.unwrap().unwrap();
let parsed: TestConfig = serde_json::from_value(result).unwrap();
assert_eq!(parsed, config2);
}
#[tokio::test]
async fn test_sqlite_concurrent_access() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let path = temp_file.path().to_path_buf();
let source = SqliteSource::open(path).await.unwrap();
let source = Arc::new(source);
let config1 = TestConfig {
name: "task1".to_string(),
count: 100,
};
let config2 = TestConfig {
name: "task2".to_string(),
count: 200,
};
let (r1, r2) = tokio::join!(
async {
source
.set("key1", &serde_json::to_value(&config1).unwrap())
.await
.unwrap();
source.get("key1").await.unwrap().unwrap()
},
async {
source
.set("key2", &serde_json::to_value(&config2).unwrap())
.await
.unwrap();
source.get("key2").await.unwrap().unwrap()
}
);
let parsed1: TestConfig = serde_json::from_value(r1).unwrap();
let parsed2: TestConfig = serde_json::from_value(r2).unwrap();
assert_eq!(parsed1, config1);
assert_eq!(parsed2, config2);
}
#[tokio::test]
async fn test_get_or_prompt_persists_to_first_writable_source() {
let source1 = Arc::new(MockSource::new());
let source2 = Arc::new(MockSource::new());
let manager = ConfigManager::new(vec![source1.clone(), source2.clone()]);
let result: Result<TestConfig, ConfigError> = manager.get_or_prompt().await;
assert!(result.is_err());
assert_eq!(source1.set_call_count(), 0);
assert_eq!(source2.set_call_count(), 0);
}
#[tokio::test]
async fn test_full_resolution_chain_sqlite_fallback() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let manager = ConfigManager::new(vec![sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite".to_string(),
count: 42,
};
sqlite
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result, config);
}
#[tokio::test]
async fn test_full_resolution_chain_env_overrides_sqlite() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let env_source = Arc::new(EnvSource);
let manager = ConfigManager::new(vec![env_source.clone(), sqlite.clone()]);
let sqlite_config = TestConfig {
name: "from_sqlite".to_string(),
count: 42,
};
let env_config = TestConfig {
name: "from_env".to_string(),
count: 99,
};
sqlite
.set("TestConfig", &serde_json::to_value(&sqlite_config).unwrap())
.await
.unwrap();
let env_key = format!("HARMONY_CONFIG_{}", "TestConfig");
unsafe {
std::env::set_var(&env_key, serde_json::to_string(&env_config).unwrap());
}
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result.name, "from_env");
assert_eq!(result.count, 99);
unsafe {
std::env::remove_var(&env_key);
}
}
#[tokio::test]
async fn test_branch_switching_scenario_deserialization_error() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let manager = ConfigManager::new(vec![sqlite.clone()]);
let old_config = serde_json::json!({
"name": "old_config",
"count": "not_a_number"
});
sqlite.set("TestConfig", &old_config).await.unwrap();
let result: Result<TestConfig, ConfigError> = manager.get().await;
assert!(matches!(result, Err(ConfigError::Deserialization { .. })));
}
#[tokio::test]
async fn test_prompt_source_always_returns_none() {
let source = PromptSource::new();
let result = source.get("AnyKey").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_prompt_source_set_is_noop() {
let source = PromptSource::new();
let result = source
.set("AnyKey", &serde_json::json!({"test": "value"}))
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_prompt_source_does_not_persist() {
let source = PromptSource::new();
source
.set(
"TestConfig",
&serde_json::json!({"name": "test", "count": 42}),
)
.await
.unwrap();
let result = source.get("TestConfig").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_full_chain_with_prompt_source_falls_through_to_prompt() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let source1 = Arc::new(MockSource::new());
let prompt_source = Arc::new(PromptSource::new());
let manager =
ConfigManager::new(vec![source1.clone(), sqlite.clone(), prompt_source.clone()]);
let result: Result<TestConfig, ConfigError> = manager.get().await;
assert!(matches!(result, Err(ConfigError::NotFound { .. })));
}
#[derive(Debug)]
struct AlwaysErrorStore;
#[async_trait]
impl SecretStore for AlwaysErrorStore {
async fn get_raw(&self, _: &str, _: &str) -> Result<Vec<u8>, SecretStoreError> {
Err(SecretStoreError::Store("Connection refused".into()))
}
async fn set_raw(&self, _: &str, _: &str, _: &[u8]) -> Result<(), SecretStoreError> {
Err(SecretStoreError::Store("Connection refused".into()))
}
}
#[tokio::test]
async fn test_store_source_error_falls_through_to_sqlite() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let store_source = Arc::new(StoreSource::new("test".to_string(), AlwaysErrorStore));
let manager = ConfigManager::new(vec![store_source.clone(), sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite".to_string(),
count: 42,
};
sqlite
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result.name, "from_sqlite");
assert_eq!(result.count, 42);
}
#[derive(Debug)]
struct NeverFindsStore;
#[async_trait]
impl SecretStore for NeverFindsStore {
async fn get_raw(&self, _: &str, _: &str) -> Result<Vec<u8>, SecretStoreError> {
Err(SecretStoreError::NotFound {
namespace: "test".to_string(),
key: "test".to_string(),
})
}
async fn set_raw(&self, _: &str, _: &str, _: &[u8]) -> Result<(), SecretStoreError> {
Ok(())
}
}
#[tokio::test]
async fn test_store_source_not_found_falls_through_to_sqlite() {
use tempfile::NamedTempFile;
let temp_file = NamedTempFile::new().unwrap();
let sqlite = SqliteSource::open(temp_file.path().to_path_buf())
.await
.unwrap();
let sqlite = Arc::new(sqlite);
let store_source = Arc::new(StoreSource::new("test".to_string(), NeverFindsStore));
let manager = ConfigManager::new(vec![store_source.clone(), sqlite.clone()]);
let config = TestConfig {
name: "from_sqlite".to_string(),
count: 99,
};
sqlite
.set("TestConfig", &serde_json::to_value(&config).unwrap())
.await
.unwrap();
let result: TestConfig = manager.get().await.unwrap();
assert_eq!(result.name, "from_sqlite");
assert_eq!(result.count, 99);
}
}

View File

@@ -42,4 +42,8 @@ impl ConfigSource for EnvSource {
}
Ok(())
}
fn should_persist(&self) -> bool {
false
}
}

View File

@@ -1,4 +1,5 @@
pub mod env;
pub mod local_file;
pub mod prompt;
pub mod sqlite;
pub mod store;

View File

@@ -36,4 +36,8 @@ impl ConfigSource for PromptSource {
async fn set(&self, _key: &str, _value: &serde_json::Value) -> Result<(), ConfigError> {
Ok(())
}
fn should_persist(&self) -> bool {
false
}
}

View File

@@ -0,0 +1,91 @@
use async_trait::async_trait;
use sqlx::{SqlitePool, sqlite::SqlitePoolOptions};
use std::path::PathBuf;
use tokio::fs;
use crate::{ConfigError, ConfigSource};
pub struct SqliteSource {
pool: SqlitePool,
}
impl SqliteSource {
pub async fn open(path: PathBuf) -> Result<Self, ConfigError> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await.map_err(|e| {
ConfigError::SqliteError(format!("Failed to create config directory: {}", e))
})?;
}
let database_url = format!("sqlite:{}?mode=rwc", path.display());
let pool = SqlitePoolOptions::new()
.connect(&database_url)
.await
.map_err(|e| ConfigError::SqliteError(format!("Failed to open database: {}", e)))?;
sqlx::query(
"CREATE TABLE IF NOT EXISTS config (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
)",
)
.execute(&pool)
.await
.map_err(|e| ConfigError::SqliteError(format!("Failed to create table: {}", e)))?;
Ok(Self { pool })
}
pub async fn default() -> Result<Self, ConfigError> {
let path = crate::default_config_dir()
.ok_or_else(|| {
ConfigError::SqliteError("Could not determine default config directory".into())
})?
.join("config.db");
Self::open(path).await
}
}
#[async_trait]
impl ConfigSource for SqliteSource {
async fn get(&self, key: &str) -> Result<Option<serde_json::Value>, ConfigError> {
let row: Option<(String,)> = sqlx::query_as("SELECT value FROM config WHERE key = ?")
.bind(key)
.fetch_optional(&self.pool)
.await
.map_err(|e| ConfigError::SqliteError(format!("Failed to query database: {}", e)))?;
match row {
Some((value,)) => {
let json_value: serde_json::Value =
serde_json::from_str(&value).map_err(|e| ConfigError::Deserialization {
key: key.to_string(),
source: e,
})?;
Ok(Some(json_value))
}
None => Ok(None),
}
}
async fn set(&self, key: &str, value: &serde_json::Value) -> Result<(), ConfigError> {
let json_string = serde_json::to_string(value).map_err(|e| ConfigError::Serialization {
key: key.to_string(),
source: e,
})?;
sqlx::query(
"INSERT OR REPLACE INTO config (key, value, updated_at) VALUES (?, ?, datetime('now'))",
)
.bind(key)
.bind(&json_string)
.execute(&self.pool)
.await
.map_err(|e| {
ConfigError::SqliteError(format!("Failed to insert/update database: {}", e))
})?;
Ok(())
}
}

View File

@@ -27,7 +27,7 @@ impl<S: SecretStore + 'static> ConfigSource for StoreSource<S> {
Ok(Some(value))
}
Err(harmony_secret::SecretStoreError::NotFound { .. }) => Ok(None),
Err(e) => Err(ConfigError::StoreError(e)),
Err(_) => Ok(None),
}
}

View File

@@ -22,6 +22,8 @@ inquire.workspace = true
interactive-parse = "0.1.5"
schemars = "0.8"
vaultrs = "0.7.4"
reqwest = { workspace = true, features = ["json"] }
url.workspace = true
[dev-dependencies]
pretty_assertions.workspace = true

View File

@@ -29,4 +29,18 @@ lazy_static! {
env::var("OPENBAO_SKIP_TLS").map(|v| v == "true").unwrap_or(false);
pub static ref OPENBAO_KV_MOUNT: String =
env::var("OPENBAO_KV_MOUNT").unwrap_or_else(|_| "secret".to_string());
pub static ref OPENBAO_AUTH_MOUNT: String =
env::var("OPENBAO_AUTH_MOUNT").unwrap_or_else(|_| "userpass".to_string());
}
lazy_static! {
// Zitadel OIDC configuration (for JWT auth flow)
pub static ref HARMONY_SSO_URL: Option<String> =
env::var("HARMONY_SSO_URL").ok();
pub static ref HARMONY_SSO_CLIENT_ID: Option<String> =
env::var("HARMONY_SSO_CLIENT_ID").ok();
pub static ref HARMONY_SSO_CLIENT_SECRET: Option<String> =
env::var("HARMONY_SSO_CLIENT_SECRET").ok();
pub static ref HARMONY_SECRETS_URL: Option<String> =
env::var("HARMONY_SECRETS_URL").ok();
}

View File

@@ -1,13 +1,16 @@
pub mod config;
mod store;
pub mod store;
use crate::config::SECRET_NAMESPACE;
use async_trait::async_trait;
use config::HARMONY_SSO_CLIENT_ID;
use config::HARMONY_SSO_URL;
use config::INFISICAL_CLIENT_ID;
use config::INFISICAL_CLIENT_SECRET;
use config::INFISICAL_ENVIRONMENT;
use config::INFISICAL_PROJECT_ID;
use config::INFISICAL_URL;
use config::OPENBAO_AUTH_MOUNT;
use config::OPENBAO_KV_MOUNT;
use config::OPENBAO_PASSWORD;
use config::OPENBAO_SKIP_TLS;
@@ -23,7 +26,7 @@ use serde::{Serialize, de::DeserializeOwned};
use std::fmt;
use store::InfisicalSecretStore;
use store::LocalFileSecretStore;
use store::OpenbaoSecretStore;
pub use store::OpenbaoSecretStore;
use thiserror::Error;
use tokio::sync::OnceCell;
@@ -85,10 +88,13 @@ async fn init_secret_manager() -> SecretManager {
let store = OpenbaoSecretStore::new(
OPENBAO_URL.clone().expect("Openbao/Vault URL must be set, see harmony_secret config for ways to provide it. You can try with OPENBAO_URL or VAULT_ADDR"),
OPENBAO_KV_MOUNT.clone(),
OPENBAO_AUTH_MOUNT.clone(),
*OPENBAO_SKIP_TLS,
OPENBAO_TOKEN.clone(),
OPENBAO_USERNAME.clone(),
OPENBAO_PASSWORD.clone(),
HARMONY_SSO_URL.clone(),
HARMONY_SSO_CLIENT_ID.clone(),
)
.await
.expect("Failed to initialize Openbao/Vault secret store");

View File

@@ -1,9 +1,8 @@
mod infisical;
mod local_file;
mod openbao;
pub mod zitadel;
pub use infisical::InfisicalSecretStore;
pub use infisical::*;
pub use local_file::LocalFileSecretStore;
pub use local_file::*;
pub use openbao::OpenbaoSecretStore;

View File

@@ -6,14 +6,10 @@ use std::fmt::Debug;
use std::fs;
use std::path::PathBuf;
use vaultrs::auth;
use vaultrs::client::{Client, VaultClient, VaultClientSettingsBuilder};
use vaultrs::client::{VaultClient, VaultClientSettingsBuilder};
use vaultrs::kv2;
/// Token response from Vault/Openbao auth endpoints
#[derive(Debug, Deserialize)]
struct TokenResponse {
auth: AuthInfo,
}
use super::zitadel::ZitadelOidcAuth;
#[derive(Debug, Serialize, Deserialize)]
struct AuthInfo {
@@ -36,6 +32,7 @@ impl From<vaultrs::api::AuthInfo> for AuthInfo {
pub struct OpenbaoSecretStore {
client: VaultClient,
kv_mount: String,
auth_mount: String,
}
impl Debug for OpenbaoSecretStore {
@@ -43,26 +40,35 @@ impl Debug for OpenbaoSecretStore {
f.debug_struct("OpenbaoSecretStore")
.field("client", &self.client.settings)
.field("kv_mount", &self.kv_mount)
.field("auth_mount", &self.auth_mount)
.finish()
}
}
impl OpenbaoSecretStore {
/// Creates a new Openbao/Vault secret store with authentication
/// Creates a new Openbao/Vault secret store with authentication.
///
/// - `kv_mount`: The KV v2 engine mount path (e.g., `"secret"`). Used for secret storage.
/// - `auth_mount`: The auth method mount path (e.g., `"userpass"`). Used for userpass authentication.
/// - `zitadel_sso_url`: Zitadel OIDC server URL (e.g., `"https://sso.example.com"`). If provided along with `zitadel_client_id`, Zitadel OIDC device flow will be attempted.
/// - `zitadel_client_id`: OIDC client ID registered in Zitadel.
pub async fn new(
base_url: String,
kv_mount: String,
auth_mount: String,
skip_tls: bool,
token: Option<String>,
username: Option<String>,
password: Option<String>,
zitadel_sso_url: Option<String>,
zitadel_client_id: Option<String>,
) -> Result<Self, SecretStoreError> {
info!("OPENBAO_STORE: Initializing client for URL: {base_url}");
// 1. If token is provided via env var, use it directly
if let Some(t) = token {
debug!("OPENBAO_STORE: Using token from environment variable");
return Self::with_token(&base_url, skip_tls, &t, &kv_mount);
return Self::with_token(&base_url, skip_tls, &t, &kv_mount, &auth_mount);
}
// 2. Try to load cached token
@@ -76,32 +82,81 @@ impl OpenbaoSecretStore {
skip_tls,
&cached_token.client_token,
&kv_mount,
&auth_mount,
);
}
warn!("OPENBAO_STORE: Cached token is invalid or expired");
}
// 3. Authenticate with username/password
// 3. Try Zitadel OIDC device flow if configured
if let (Some(sso_url), Some(client_id)) = (zitadel_sso_url, zitadel_client_id) {
info!("OPENBAO_STORE: Attempting Zitadel OIDC device flow");
match Self::authenticate_zitadel_oidc(&sso_url, &client_id, skip_tls).await {
Ok(oidc_session) => {
info!("OPENBAO_STORE: Zitadel OIDC authentication successful");
// Cache the OIDC session token
let auth_info = AuthInfo {
client_token: oidc_session.openbao_token,
lease_duration: Some(oidc_session.openbao_token_ttl),
token_type: "oidc".to_string(),
};
if let Err(e) = Self::cache_token(&cache_path, &auth_info) {
warn!("OPENBAO_STORE: Failed to cache OIDC token: {e}");
}
return Self::with_token(
&base_url,
skip_tls,
&auth_info.client_token,
&kv_mount,
&auth_mount,
);
}
Err(e) => {
warn!("OPENBAO_STORE: Zitadel OIDC failed: {e}, trying other auth methods");
}
}
}
// 4. Authenticate with username/password
let (user, pass) = match (username, password) {
(Some(u), Some(p)) => (u, p),
_ => {
return Err(SecretStoreError::Store(
"No valid token found and username/password not provided. \
Set OPENBAO_TOKEN or OPENBAO_USERNAME/OPENBAO_PASSWORD environment variables."
Set OPENBAO_TOKEN, OPENBAO_USERNAME/OPENBAO_PASSWORD, or \
HARMONY_SSO_URL/HARMONY_SSO_CLIENT_ID for Zitadel OIDC."
.into(),
));
}
};
let token =
Self::authenticate_userpass(&base_url, &kv_mount, skip_tls, &user, &pass).await?;
Self::authenticate_userpass(&base_url, &auth_mount, skip_tls, &user, &pass).await?;
// Cache the token
if let Err(e) = Self::cache_token(&cache_path, &token) {
warn!("OPENBAO_STORE: Failed to cache token: {e}");
}
Self::with_token(&base_url, skip_tls, &token.client_token, &kv_mount)
Self::with_token(
&base_url,
skip_tls,
&token.client_token,
&kv_mount,
&auth_mount,
)
}
async fn authenticate_zitadel_oidc(
sso_url: &str,
client_id: &str,
skip_tls: bool,
) -> Result<super::zitadel::OidcSession, SecretStoreError> {
let oidc_auth = ZitadelOidcAuth::new(sso_url.to_string(), client_id.to_string(), skip_tls);
oidc_auth
.authenticate()
.await
.map_err(|e| SecretStoreError::Store(e.into()))
}
/// Create a client with an existing token
@@ -110,6 +165,7 @@ impl OpenbaoSecretStore {
skip_tls: bool,
token: &str,
kv_mount: &str,
auth_mount: &str,
) -> Result<Self, SecretStoreError> {
let mut settings = VaultClientSettingsBuilder::default();
settings.address(base_url).token(token);
@@ -129,6 +185,7 @@ impl OpenbaoSecretStore {
Ok(Self {
client,
kv_mount: kv_mount.to_string(),
auth_mount: auth_mount.to_string(),
})
}
@@ -168,7 +225,6 @@ impl OpenbaoSecretStore {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
// Set file permissions to 0600 (owner read/write only)
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
@@ -209,14 +265,13 @@ impl OpenbaoSecretStore {
/// Authenticate using username/password (userpass auth method)
async fn authenticate_userpass(
base_url: &str,
kv_mount: &str,
auth_mount: &str,
skip_tls: bool,
username: &str,
password: &str,
) -> Result<AuthInfo, SecretStoreError> {
info!("OPENBAO_STORE: Authenticating with username/password");
// Create a client without a token for authentication
let mut settings = VaultClientSettingsBuilder::default();
settings.address(base_url);
if skip_tls {
@@ -230,8 +285,7 @@ impl OpenbaoSecretStore {
)
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// Authenticate using userpass method
let token = auth::userpass::login(&client, kv_mount, username, password)
let token = auth::userpass::login(&client, auth_mount, username, password)
.await
.map_err(|e| SecretStoreError::Store(Box::new(e)))?;
@@ -249,7 +303,6 @@ impl SecretStore for OpenbaoSecretStore {
let data: serde_json::Value = kv2::read(&self.client, &self.kv_mount, &path)
.await
.map_err(|e| {
// Check for not found error
if e.to_string().contains("does not exist") || e.to_string().contains("404") {
SecretStoreError::NotFound {
namespace: namespace.to_string(),
@@ -260,7 +313,6 @@ impl SecretStore for OpenbaoSecretStore {
}
})?;
// Extract the actual secret value stored under the "value" key
let value = data.get("value").and_then(|v| v.as_str()).ok_or_else(|| {
SecretStoreError::Store("Secret does not contain expected 'value' field".into())
})?;
@@ -281,7 +333,6 @@ impl SecretStore for OpenbaoSecretStore {
let value_str =
String::from_utf8(val.to_vec()).map_err(|e| SecretStoreError::Store(Box::new(e)))?;
// Create the data structure expected by our format
let data = serde_json::json!({
"value": value_str
});

View File

@@ -0,0 +1,335 @@
use log::{debug, info};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OidcSession {
pub openbao_token: String,
pub openbao_token_ttl: u64,
pub openbao_renewable: bool,
pub refresh_token: Option<String>,
pub id_token: Option<String>,
pub expires_at: Option<i64>,
}
impl OidcSession {
pub fn is_expired(&self) -> bool {
if let Some(expires_at) = self.expires_at {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
expires_at <= now
} else {
false
}
}
pub fn is_openbao_token_expired(&self, _ttl: u64) -> bool {
if let Some(expires_at) = self.expires_at {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
expires_at <= now
} else {
false
}
}
}
#[derive(Debug, Deserialize)]
struct DeviceAuthorizationResponse {
device_code: String,
user_code: String,
verification_uri: String,
#[serde(rename = "verification_uri_complete")]
verification_uri_complete: Option<String>,
expires_in: u64,
interval: u64,
}
#[derive(Debug, Deserialize)]
struct TokenResponse {
access_token: String,
#[serde(rename = "expires_in", default)]
expires_in: Option<u64>,
#[serde(rename = "refresh_token", default)]
refresh_token: Option<String>,
#[serde(rename = "id_token", default)]
id_token: Option<String>,
}
#[derive(Debug, Deserialize)]
struct TokenErrorResponse {
error: String,
#[serde(rename = "error_description")]
error_description: Option<String>,
}
fn get_session_cache_path() -> PathBuf {
let hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
"zitadel-oidc".hash(&mut hasher);
format!("{:016x}", hasher.finish())
};
directories::BaseDirs::new()
.map(|dirs| {
dirs.data_dir()
.join("harmony")
.join("secrets")
.join(format!("oidc_session_{hash}"))
})
.unwrap_or_else(|| PathBuf::from(format!("/tmp/oidc_session_{hash}")))
}
fn load_session() -> Result<OidcSession, String> {
let path = get_session_cache_path();
serde_json::from_str(
&fs::read_to_string(&path)
.map_err(|e| format!("Could not load session from {path:?}: {e}"))?,
)
.map_err(|e| format!("Could not deserialize session from {path:?}: {e}"))
}
fn save_session(session: &OidcSession) -> Result<(), String> {
let path = get_session_cache_path();
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("Could not create session directory: {e}"))?;
}
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
let mut file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(&path)
.map_err(|e| format!("Could not open session file: {e}"))?;
use std::io::Write;
file.write_all(
serde_json::to_string_pretty(session)
.map_err(|e| format!("Could not serialize session: {e}"))?
.as_bytes(),
)
.map_err(|e| format!("Could not write session file: {e}"))?;
}
#[cfg(not(unix))]
{
fs::write(
&path,
serde_json::to_string_pretty(session).map_err(|e| e.to_string())?,
)
.map_err(|e| format!("Could not write session file: {e}"))?;
}
Ok(())
}
pub struct ZitadelOidcAuth {
sso_url: String,
client_id: String,
skip_tls: bool,
}
impl ZitadelOidcAuth {
pub fn new(sso_url: String, client_id: String, skip_tls: bool) -> Self {
Self {
sso_url,
client_id,
skip_tls,
}
}
pub async fn authenticate(&self) -> Result<OidcSession, String> {
if let Ok(session) = load_session() {
if !session.is_expired() {
info!("ZITADEL_OIDC: Using cached session");
return Ok(session);
}
}
info!("ZITADEL_OIDC: Starting device authorization flow");
let device_code = self.request_device_code().await?;
self.print_verification_instructions(&device_code);
let token_response = self
.poll_for_token(&device_code, device_code.interval)
.await?;
let session = self.process_token_response(token_response).await?;
let _ = save_session(&session);
Ok(session)
}
fn http_client(&self) -> Result<reqwest::Client, String> {
let mut builder = reqwest::Client::builder();
if self.skip_tls {
builder = builder.danger_accept_invalid_certs(true);
}
builder
.build()
.map_err(|e| format!("Failed to build HTTP client: {e}"))
}
async fn request_device_code(&self) -> Result<DeviceAuthorizationResponse, String> {
let client = self.http_client()?;
let params = [
("client_id", self.client_id.as_str()),
("scope", "openid email profile offline_access"),
];
let response = client
.post(format!("{}/oauth/v2/device_authorization", self.sso_url))
.form(&params)
.send()
.await
.map_err(|e| format!("Device authorization request failed: {e}"))?;
response
.json::<DeviceAuthorizationResponse>()
.await
.map_err(|e| format!("Failed to parse device authorization response: {e}"))
}
fn print_verification_instructions(&self, code: &DeviceAuthorizationResponse) {
println!();
println!("=================================================");
println!("[Harmony] To authenticate with Zitadel, open your browser:");
println!(" {}", code.verification_uri);
println!();
println!(" and enter code: {}", code.user_code);
if let Some(ref complete_url) = code.verification_uri_complete {
println!();
println!(" Or visit this direct link (code is pre-filled):");
println!(" {}", complete_url);
}
println!("=================================================");
println!();
}
async fn poll_for_token(
&self,
code: &DeviceAuthorizationResponse,
interval_secs: u64,
) -> Result<TokenResponse, String> {
let client = self.http_client()?;
let params = [
("grant_type", "urn:ietf:params:oauth:grant-type:device_code"),
("device_code", code.device_code.as_str()),
("client_id", self.client_id.as_str()),
];
let interval = Duration::from_secs(interval_secs.max(5));
let max_attempts = (code.expires_in / interval_secs).max(60) as usize;
for attempt in 0..max_attempts {
tokio::time::sleep(interval).await;
let response = client
.post(format!("{}/oauth/v2/token", self.sso_url))
.form(&params)
.send()
.await
.map_err(|e| format!("Token request failed: {e}"))?;
let status = response.status();
let body = response
.text()
.await
.map_err(|e| format!("Failed to read response body: {e}"))?;
if status == 400 {
if let Ok(error) = serde_json::from_str::<TokenErrorResponse>(&body) {
match error.error.as_str() {
"authorization_pending" => {
debug!("ZITADEL_OIDC: authorization_pending (attempt {})", attempt);
continue;
}
"slow_down" => {
debug!("ZITADEL_OIDC: slow_down, increasing interval");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
"expired_token" => {
return Err(
"Device code expired. Please restart authentication.".to_string()
);
}
"access_denied" => {
return Err("Access denied by user.".to_string());
}
_ => {
return Err(format!(
"OAuth error: {} - {}",
error.error,
error.error_description.unwrap_or_default()
));
}
}
}
}
return serde_json::from_str(&body)
.map_err(|e| format!("Failed to parse token response: {e}"));
}
Err("Token polling timed out".to_string())
}
async fn process_token_response(&self, response: TokenResponse) -> Result<OidcSession, String> {
let expires_at = response.expires_in.map(|ttl| {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0);
now + ttl as i64
});
Ok(OidcSession {
openbao_token: response.access_token,
openbao_token_ttl: response.expires_in.unwrap_or(3600),
openbao_renewable: true,
refresh_token: response.refresh_token,
id_token: response.id_token,
expires_at,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_oidc_session_is_expired() {
let session = OidcSession {
openbao_token: "test".to_string(),
openbao_token_ttl: 3600,
openbao_renewable: true,
refresh_token: None,
id_token: None,
expires_at: Some(0),
};
assert!(session.is_expired());
let future = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or(0)
+ 3600;
let session2 = OidcSession {
openbao_token: "test".to_string(),
openbao_token_ttl: 3600,
openbao_renewable: true,
refresh_token: None,
id_token: None,
expires_at: Some(future),
};
assert!(!session2.is_expired());
}
}

View File

@@ -10,6 +10,31 @@ const K3D_BIN_FILE_NAME: &str = "k3d";
pub struct K3d {
base_dir: PathBuf,
cluster_name: Option<String>,
port_mappings: Vec<PortMapping>,
}
#[derive(Debug, Clone)]
pub struct PortMapping {
pub host_port: u32,
pub container_port: u32,
pub loadbalancer: String,
}
impl PortMapping {
pub fn new(host_port: u32, container_port: u32) -> Self {
Self {
host_port,
container_port,
loadbalancer: "loadbalancer".to_string(),
}
}
pub fn to_arg(&self) -> String {
format!(
"{}:{}@{}",
self.host_port, self.container_port, self.loadbalancer
)
}
}
impl K3d {
@@ -17,9 +42,15 @@ impl K3d {
Self {
base_dir,
cluster_name,
port_mappings: Vec::new(),
}
}
pub fn with_port_mappings(mut self, mappings: Vec<PortMapping>) -> Self {
self.port_mappings = mappings;
self
}
async fn get_binary_for_current_platform(
&self,
latest_release: octocrab::models::repos::Release,
@@ -329,14 +360,29 @@ impl K3d {
}
fn create_cluster(&self, cluster_name: &str) -> Result<(), String> {
let output = self.run_k3d_command(["cluster", "create", cluster_name])?;
let mut args = vec![
"cluster".to_string(),
"create".to_string(),
cluster_name.to_string(),
];
for mapping in &self.port_mappings {
args.push("-p".to_string());
args.push(mapping.to_arg());
}
let output = self.run_k3d_command(args)?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("Failed to create cluster: {}", stderr));
}
info!("Successfully created k3d cluster '{}'", cluster_name);
info!(
"Successfully created k3d cluster '{}' with {} port mappings",
cluster_name,
self.port_mappings.len()
);
Ok(())
}

22
opnsense-api/Cargo.toml Normal file
View File

@@ -0,0 +1,22 @@
[package]
name = "opnsense-api"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
reqwest.workspace = true
serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
thiserror.workspace = true
log.workspace = true
env_logger.workspace = true
inquire.workspace = true
http.workspace = true
base64.workspace = true
[dev-dependencies]
tokio-test.workspace = true
pretty_assertions.workspace = true

View File

@@ -0,0 +1,173 @@
//! Example: check for firmware updates and apply them.
//!
//! ```text
//! cargo run --example firmware_update
//! ```
//!
//! This runs a full firmware update workflow:
//! 1. POST /api/core/firmware/check — triggers a background check for updates
//! 2. POST /api/core/firmware/status — retrieves the check results
//! 3. If updates are available, POST /api/core/firmware/update — applies them
//!
//! The check can take a while since it connects to the update mirror.
use std::env;
use opnsense_api::client::OpnsenseClient;
use serde::{Deserialize, Serialize};
#[derive(Debug, Deserialize)]
pub struct FirmwareCheckResponse {
pub status: String,
pub msg_uuid: String,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct FirmwareStatusResponse {
pub status: String,
#[serde(default)]
pub status_msg: Option<String>,
#[serde(default)]
pub all_packages: serde_json::Value,
#[serde(default)]
pub product: serde_json::Value,
#[serde(default)]
pub status_reboot: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct FirmwareActionResponse {
pub status: String,
#[serde(default)]
pub msg_uuid: String,
#[serde(default)]
pub status_msg: Option<String>,
}
fn build_client() -> OpnsenseClient {
let base_url = env::var("OPNSENSE_BASE_URL")
.unwrap_or_else(|_| "https://192.168.1.1/api".to_string());
match (env::var("OPNSENSE_API_KEY").ok(), env::var("OPNSENSE_API_SECRET").ok()) {
(Some(key), Some(secret)) => OpnsenseClient::builder()
.base_url(&base_url)
.auth_from_key_secret(&key, &secret)
.skip_tls_verify()
.timeout_secs(120)
.build()
.expect("failed to build HTTP client"),
_ => {
eprintln!("ERROR: OPNSENSE_API_KEY and OPNSENSE_API_SECRET must be set.");
eprintln!(" export OPNSENSE_API_KEY=your_key");
eprintln!(" export OPNSENSE_API_SECRET=your_secret");
eprintln!(" export OPNSENSE_BASE_URL=https://your-firewall/api");
std::process::exit(1);
}
}
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let client = build_client();
println!();
println!("╔═══════════════════════════════════════════════════════════╗");
println!("║ OPNsense Firmware Update ║");
println!("╚═══════════════════════════════════════════════════════════╝");
println!();
println!(" [1/2] Checking for updates (this may take a moment) ...");
println!();
log::info!("POST /api/core/firmware/check");
let check: FirmwareCheckResponse = client
.post_typed("core", "firmware", "check", None::<&()>)
.await
.expect("check request failed");
println!(" Check triggered, msg_uuid: {}", check.msg_uuid);
println!();
println!(" Waiting for check to complete ...");
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
println!();
println!(" [2/2] Fetching update status ...");
log::info!("POST /api/core/firmware/status");
let status: FirmwareStatusResponse = client
.post_typed("core", "firmware", "status", None::<&()>)
.await
.expect("status request failed");
println!();
match status.status.as_str() {
"none" => {
println!("{}", status.status_msg.as_deref().unwrap_or("No updates available."));
}
"update" | "upgrade" => {
println!("{}", status.status_msg.as_deref().unwrap_or("Updates available."));
if let Some(reboot) = status.status_reboot {
if reboot == "1" {
println!(" ⚠ This update requires a reboot.");
}
}
let pkg_count = if let serde_json::Value::Object(ref map) = status.all_packages {
map.len()
} else {
0
};
if pkg_count > 0 {
println!(" {} package(s) to update:", pkg_count);
if let serde_json::Value::Object(ref packages) = status.all_packages {
for (name, info) in packages.iter().take(10) {
let reason = info.get("reason").and_then(|v| v.as_str()).unwrap_or("?");
let old_ver = info.get("old").and_then(|v| v.as_str()).unwrap_or("N/A");
let new_ver = info.get("new").and_then(|v| v.as_str()).unwrap_or("?");
println!(" {:40} {} {}{}", name, reason, old_ver, new_ver);
}
if pkg_count > 10 {
println!(" ... and {} more", pkg_count - 10);
}
}
}
println!();
println!(" Run with environment variable OPNSENSE_FIRMWARE_UPDATE=1 to apply updates.");
println!(" WARNING: firmware updates can cause connectivity interruptions.");
}
"error" => {
println!(" ✗ Error: {}", status.status_msg.as_deref().unwrap_or("Unknown error"));
}
other => {
println!(" ? Unexpected status: {other}");
println!(" Full response: {}", serde_json::to_string_pretty(&status).unwrap());
}
}
if env::var("OPNSENSE_FIRMWARE_UPDATE").as_deref() == Ok("1") {
if status.status == "update" || status.status == "upgrade" {
println!();
println!(" Applying firmware update ...");
log::info!("POST /api/core/firmware/update");
let result: FirmwareActionResponse = client
.post_typed("core", "firmware", "update", None::<&()>)
.await
.expect("update request failed");
if result.status == "ok" {
println!(" ✓ Update started, msg_uuid: {}", result.msg_uuid);
println!(" The firewall is updating in the background.");
} else {
println!(" ✗ Update failed: {:?}", result.status_msg);
}
} else {
println!();
println!(" No updates to apply.");
}
}
}

View File

@@ -0,0 +1,86 @@
//! Example: install OPNsense packages (os-haproxy, os-caddy) via the firmware API.
//!
//! ```text
//! cargo run --example install_package -- os-haproxy
//! cargo run --example install_package -- os-caddy os-acme
//! ```
//!
//! Calls `POST /api/core/firmware/install/<pkg_name>` for each package.
//! These are the standard OPNsense plugin packages.
use std::env;
use opnsense_api::client::OpnsenseClient;
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct FirmwareActionResponse {
pub status: String,
#[serde(default)]
pub msg_uuid: String,
#[serde(default)]
pub status_msg: Option<String>,
}
fn build_client() -> OpnsenseClient {
let base_url = env::var("OPNSENSE_BASE_URL")
.unwrap_or_else(|_| "https://192.168.1.1/api".to_string());
match (env::var("OPNSENSE_API_KEY").ok(), env::var("OPNSENSE_API_SECRET").ok()) {
(Some(key), Some(secret)) => OpnsenseClient::builder()
.base_url(&base_url)
.auth_from_key_secret(&key, &secret)
.skip_tls_verify()
.timeout_secs(300)
.build()
.expect("failed to build HTTP client"),
_ => {
eprintln!("ERROR: OPNSENSE_API_KEY and OPNSENSE_API_SECRET must be set.");
eprintln!(" export OPNSENSE_API_KEY=your_key");
eprintln!(" export OPNSENSE_API_SECRET=your_secret");
eprintln!(" export OPNSENSE_BASE_URL=https://your-firewall/api");
std::process::exit(1);
}
}
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let packages: Vec<String> = env::args().skip(1).collect();
if packages.is_empty() {
eprintln!("Usage: cargo run --example install_package -- <package1> [package2 ...]");
eprintln!("Example: cargo run --example install_package -- os-haproxy os-caddy");
std::process::exit(1);
}
let client = build_client();
println!();
println!("╔═══════════════════════════════════════════════════════════╗");
println!("║ OPNsense Package Installer ║");
println!("╚═══════════════════════════════════════════════════════════╝");
println!();
for pkg in &packages {
println!(" Installing {pkg} ...");
log::info!("POST /api/core/firmware/install/{pkg}");
let response: FirmwareActionResponse = client
.post_typed("core", "firmware", &format!("install/{pkg}"), None::<&()>)
.await
.expect("API call failed");
if response.status == "ok" {
println!("{pkg} installed (msg_uuid: {})", response.msg_uuid);
} else {
println!("{pkg} failed: {:?}", response.status_msg);
}
}
println!();
println!(" Done.");
}

View File

@@ -0,0 +1,134 @@
//! Example: fetch and display OPNsense Dnsmasq DNS/DHCP settings.
//!
//! ```text
//! cargo run --example list_dnsmasq
//! ```
//!
//! This demonstrates the **full target DX** for the opnsense-api crate:
//!
//! 1. Build a typed [`OpnsenseClient`] — prompted for credentials if not set.
//! 2. Call `GET /api/dnsmasq/settings/get` with full type safety.
//! 3. Pretty-print the deserialized response.
//!
//! ## Credentials
//!
//! The client first checks for `OPNSENSE_API_KEY` and `OPNSENSE_API_SECRET`
//! environment variables. If neither is set, it prompts interactively.
use std::env;
use opnsense_api::client::OpnsenseClient;
use opnsense_api::generated::dnsmasq::DnsmasqSettingsResponse;
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let base_url = env::var("OPNSENSE_BASE_URL")
.unwrap_or_else(|_| {
eprintln!("OPNSENSE_BASE_URL not set, using https://192.168.1.1/api");
"https://192.168.1.1/api".to_string()
});
let client = match (env::var("OPNSENSE_API_KEY").ok(), env::var("OPNSENSE_API_SECRET").ok()) {
(Some(key), Some(secret)) => {
log::info!("Using credentials from environment variables");
OpnsenseClient::builder()
.base_url(&base_url)
.auth_from_key_secret(&key, &secret)
.skip_tls_verify()
.build()
.expect("failed to build HTTP client")
}
_ => {
eprintln!("ERROR: OPNSENSE_API_KEY and OPNSENSE_API_SECRET must be set.");
eprintln!(" export OPNSENSE_API_KEY=your_key");
eprintln!(" export OPNSENSE_API_SECRET=your_secret");
eprintln!(" export OPNSENSE_BASE_URL=https://your-firewall/api");
std::process::exit(1);
}
};
log::info!("Fetching /api/dnsmasq/settings/get ...");
let response: DnsmasqSettingsResponse = client
.get_typed("dnsmasq", "settings", "get")
.await
.expect("API call failed");
println!();
println!("╔═══════════════════════════════════════════════════════════╗");
println!("║ OPNsense Dnsmasq Settings ║");
println!("╚═══════════════════════════════════════════════════════════╝");
println!();
let s = &response.dnsmasq;
println!(" General");
println!(" ─────────────────────────────────────────────────────────");
println!(" DNS service enabled: {}", toggle(s.enable));
println!(" DNSSEC validation: {}", toggle(s.dnssec));
println!(" Log queries: {}", toggle(s.log_queries));
println!();
println!(" DNS Options");
println!(" ─────────────────────────────────────────────────────────");
println!(" Domain required: {}", toggle(s.domain_needed));
println!(" No private revers: {}", toggle(s.no_private_reverse));
println!(" Strict order: {}", toggle(s.strict_order));
println!(" No /etc/hosts: {}", toggle(s.no_hosts));
println!(" Strict bind: {}", toggle(s.strictbind));
println!(" Cache size: {}", s.cache_size.map(|v| v.to_string()).unwrap_or_else(|| "".to_string()));
println!(" Local TTL: {}", s.local_ttl.map(|v| v.to_string()).unwrap_or_else(|| "".to_string()));
println!(" DNS port: {}", s.port.map(|v| v.to_string()).unwrap_or_else(|| "53".to_string()));
println!();
println!(" DHCP Registration");
println!(" ─────────────────────────────────────────────────────────");
println!(" Register DHCP leases: {}", toggle(s.regdhcp));
println!(" Register static DHCP: {}", toggle(s.regdhcpstatic));
println!(" DHCP first: {}", toggle(s.dhcpfirst));
println!(" No ident: {}", toggle(s.no_ident));
if let Some(ref domain) = s.regdhcpdomain {
println!(" Domain: {domain}");
}
println!();
println!(" DHCP Options");
println!(" ─────────────────────────────────────────────────────────");
println!(" Add MAC address: {}", add_mac_label(&s.add_mac));
println!(" Add subnet: {}", toggle(s.add_subnet));
println!(" Strip subnet: {}", toggle(s.strip_subnet));
println!(" No /etc/resolv: {}", toggle(s.no_resolv));
if let Some(v) = s.dns_forward_max {
println!(" DNS forward max: {v}");
}
println!();
println!(" Full response (JSON)");
println!(" ─────────────────────────────────────────────────────────");
println!(" {}", serde_json::to_string_pretty(&response).unwrap());
}
fn toggle(b: bool) -> &'static str {
if b { "enabled ✓" } else { "disabled ✗" }
}
fn add_mac_label(mac: &Option<serde_json::Value>) -> String {
match mac {
Some(serde_json::Value::Object(map)) => {
map.iter()
.find(|(_, v)| v.get("selected").and_then(|s| s.as_i64()).map(|x| x == 1).unwrap_or(false))
.map(|(k, v)| {
if k.is_empty() {
v.get("value").and_then(|x| x.as_str()).unwrap_or("not set")
} else {
k.as_str()
}
})
.unwrap_or("not set")
.to_string()
}
_ => "not set".to_string(),
}
}

View File

@@ -0,0 +1,152 @@
//! Example: list all OPNsense packages (installed and available).
//!
//! ```text
//! cargo run --example list_packages
//! ```
//!
//! Calls `GET /api/core/firmware/info` which returns package listings
//! from OPNsense's firmware API.
use std::env;
use opnsense_api::client::OpnsenseClient;
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct FirmwareInfoResponse {
pub product: ProductInfo,
#[serde(default)]
pub package: Vec<PackageInfo>,
#[serde(default)]
pub plugin: Vec<PackageInfo>,
}
#[derive(Debug, Deserialize)]
pub struct ProductInfo {
pub product_name: String,
pub product_version: String,
pub product_arch: String,
#[serde(default)]
pub product_check: Option<serde_json::Value>,
}
#[derive(Debug, Deserialize)]
pub struct PackageInfo {
pub name: String,
pub version: String,
#[serde(default)]
pub comment: String,
#[serde(default)]
pub flatsize: String,
#[serde(default)]
pub locked: String,
#[serde(default)]
pub automatic: String,
#[serde(default)]
pub license: String,
#[serde(default)]
pub repository: String,
#[serde(default)]
pub origin: String,
#[serde(default)]
pub provided: String,
#[serde(default)]
pub installed: String,
#[serde(default)]
pub configured: String,
#[serde(default)]
pub tier: String,
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let base_url = env::var("OPNSENSE_BASE_URL")
.unwrap_or_else(|_| "https://192.168.1.1/api".to_string());
let client = match (env::var("OPNSENSE_API_KEY").ok(), env::var("OPNSENSE_API_SECRET").ok()) {
(Some(key), Some(secret)) => {
OpnsenseClient::builder()
.base_url(&base_url)
.auth_from_key_secret(&key, &secret)
.skip_tls_verify()
.build()
.expect("failed to build HTTP client")
}
_ => {
eprintln!("ERROR: OPNSENSE_API_KEY and OPNSENSE_API_SECRET must be set.");
eprintln!(" export OPNSENSE_API_KEY=your_key");
eprintln!(" export OPNSENSE_API_SECRET=your_secret");
eprintln!(" export OPNSENSE_BASE_URL=https://your-firewall/api");
std::process::exit(1);
}
};
log::info!("Fetching /api/core/firmware/info ...");
let response: FirmwareInfoResponse = client
.get_typed("core", "firmware", "info")
.await
.expect("API call failed");
println!();
println!("╔═══════════════════════════════════════════════════════════╗");
println!("║ OPNsense Package Information ║");
println!("╚═══════════════════════════════════════════════════════════╝");
println!();
println!(
" {} {} ({})",
response.product.product_name,
response.product.product_version,
response.product.product_arch
);
println!();
let installed: Vec<_> = response
.plugin
.iter()
.filter(|p| p.installed == "1")
.collect();
let available: Vec<_> = response
.plugin
.iter()
.filter(|p| p.installed != "1" && p.provided == "1")
.collect();
println!(" Installed plugins: {} (showing first 30)", installed.len());
println!(" ─────────────────────────────────────────────────────────");
for pkg in installed.iter().take(30) {
println!(" {:40} {}", pkg.name, pkg.version);
}
if installed.len() > 30 {
println!(" ... and {} more", installed.len() - 30);
}
println!();
println!(" Available plugins: {} (showing first 20)", available.len());
println!(" ─────────────────────────────────────────────────────────");
for pkg in available.iter().take(20) {
println!(" {:40} {}", pkg.name, pkg.version);
}
if available.len() > 20 {
println!(" ... and {} more", available.len() - 20);
}
println!();
let os_packages: Vec<_> = response
.package
.iter()
.filter(|p| p.name.starts_with("os-"))
.collect();
println!(" System packages (os-*): {} (showing first 20)", os_packages.len());
println!(" ─────────────────────────────────────────────────────────");
for pkg in os_packages.iter().take(20) {
let installed = if pkg.installed == "1" { " [installed]" } else { "" };
println!(" {:40} {}{}", pkg.name, pkg.version, installed);
}
if os_packages.len() > 20 {
println!(" ... and {} more", os_packages.len() - 20);
}
}

44
opnsense-api/src/auth.rs Normal file
View File

@@ -0,0 +1,44 @@
//! Authentication helpers for OPNsense API clients.
//!
//! OPNsense uses HTTP Basic Auth with an API key/secret pair. The key is used as
//! the username and the secret as the password.
use http::header::{HeaderMap, HeaderValue, AUTHORIZATION};
/// OPNsense API credentials: key (username) and secret (password).
#[derive(Debug, Clone)]
pub struct Credentials {
pub key: String,
pub secret: String,
}
/// Build [`Credentials`] by prompting the user for key and secret.
///
/// Uses [`inquire`] for interactive input. Call this when you want the CLI to
/// ask the user directly rather than requiring environment variables.
pub fn prompt_credentials() -> Result<Credentials, inquire::InquireError> {
use inquire::Password;
let key = inquire::Text::new("OPNsense API key:")
.with_help_message("Found in System → Access → Users → API Keys")
.prompt()?;
let secret = Password::new("OPNsense API secret:")
.without_confirmation()
.prompt()?;
Ok(Credentials { key, secret })
}
/// Add Basic Auth headers to a [`HeaderMap`].
///
/// Constructs `Authorization: Basic <base64(key:secret)>`.
pub fn add_auth_headers(headers: &mut HeaderMap, creds: &Credentials) {
use base64::Engine;
let credentials = format!("{}:{}", creds.key, creds.secret);
let encoded = base64::engine::general_purpose::STANDARD.encode(credentials.as_bytes());
let header_value = HeaderValue::from_str(&format!("Basic {}", encoded))
.expect("Basic auth header value is always valid ASCII");
headers.insert(AUTHORIZATION, header_value);
}

265
opnsense-api/src/client.rs Normal file
View File

@@ -0,0 +1,265 @@
//! Typed OPNsense API client.
//!
//! ## Core pattern
//!
//! ```ignore
//! use opnsense_api::OpnsenseClient;
//!
//! let client = OpnsenseClient::builder()
//! .base_url("https://my-opnsense.local/api")
//! .auth_from_key_secret("mykey", "mysecret")
//! .build()?;
//!
//! // GET /api/interfaces/settings/get
//! let response = client.get_typed("interfaces", "settings", "get").await?;
//! ```
//!
//! ## Response wrapper keys
//!
//! OPNsense API responses wrap the model in a key that is the controller's
//! `internalModelName` (`settings`, `haproxy`, `dnsmasq`, …). Generated
//! response types in [`crate::generated`] use the correct key, so pass them
//! directly as the type parameter.
use std::sync::Arc;
use http::header::{HeaderMap, CONTENT_TYPE};
use log::{debug, trace, warn};
use serde::de::DeserializeOwned;
use crate::auth::{add_auth_headers, Credentials};
use crate::error::Error;
/// Builder for [`OpnsenseClient`].
#[derive(Debug, Clone)]
pub struct OpnsenseClientBuilder {
base_url: String,
credentials: Option<Credentials>,
timeout_secs: Option<u64>,
skip_tls_verify: bool,
user_agent: String,
}
impl OpnsenseClientBuilder {
/// Set the OPNsense API base URL — include the `/api` suffix.
///
/// Example: `"https://192.168.1.1/api"`
pub fn base_url(mut self, url: impl Into<String>) -> Self {
self.base_url = url.into();
self
}
/// Provide credentials directly as key/secret.
pub fn auth_from_key_secret(mut self, key: impl Into<String>, secret: impl Into<String>) -> Self {
self.credentials = Some(Credentials {
key: key.into(),
secret: secret.into(),
});
self
}
/// Prompt the user for API key and secret using the terminal.
///
/// Uses [`crate::auth::prompt_credentials`] internally.
pub fn auth_interactive(self) -> Result<Self, inquire::InquireError> {
let creds = crate::auth::prompt_credentials()?;
Ok(self.auth_from_key_secret(creds.key, creds.secret))
}
/// Override the request timeout (in seconds). Default: 30 s.
pub fn timeout_secs(mut self, secs: u64) -> Self {
self.timeout_secs = Some(secs);
self
}
/// Skip TLS certificate verification. Useful for self-signed certs.
///
/// **Never use this in production.**
pub fn skip_tls_verify(mut self) -> Self {
self.skip_tls_verify = true;
self
}
/// Build the [`OpnsenseClient`].
pub fn build(self) -> Result<OpnsenseClient, Error> {
let credentials = self.credentials.ok_or(Error::NoCredentials)?;
let mut builder = reqwest::Client::builder();
if self.skip_tls_verify {
builder = builder.danger_accept_invalid_certs(true);
}
if let Some(secs) = self.timeout_secs {
builder = builder.timeout(std::time::Duration::from_secs(secs));
}
builder = builder.user_agent(&self.user_agent);
let http = builder.build().map_err(Error::Client)?;
Ok(OpnsenseClient {
base_url: self.base_url.trim_end_matches('/').to_string(),
credentials: Arc::new(credentials),
http,
})
}
}
/// A typed OPNsense API client.
///
/// Construct with [`OpnsenseClient::builder()`].
#[derive(Clone)]
pub struct OpnsenseClient {
base_url: String,
credentials: Arc<Credentials>,
http: reqwest::Client,
}
impl std::fmt::Debug for OpnsenseClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OpnsenseClient")
.field("base_url", &self.base_url)
.finish_non_exhaustive()
}
}
impl OpnsenseClient {
/// Start configuring a client.
///
/// ```ignore
/// OpnsenseClient::builder()
/// .base_url("https://firewall.example.com/api")
/// .auth_from_key_secret("key", "secret")
/// .build()?
/// ```
pub fn builder() -> OpnsenseClientBuilder {
OpnsenseClientBuilder {
base_url: String::new(),
credentials: None,
timeout_secs: Some(30),
skip_tls_verify: false,
user_agent: concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")).to_string(),
}
}
/// Issue a GET request and deserialize the response into a concrete type.
///
/// This is the main entry point for typed model endpoints.
///
/// # Example
///
/// ```ignore
/// let resp: InterfacesSettingsResponse = client
/// .get_typed("interfaces", "settings", "get")
/// .await?;
/// ```
///
/// For endpoints that do not return JSON (or don't need typed parsing),
/// use [`Self::get_untyped`] instead.
pub async fn get_typed<R>(&self, module: &str, controller: &str, command: &str) -> Result<R, Error>
where
R: DeserializeOwned + std::fmt::Debug,
{
let url = format!(
"{}/{}/{}/{}",
self.base_url, module, controller, command
);
let mut headers = HeaderMap::new();
add_auth_headers(&mut headers, &self.credentials);
debug!(target: "opnsense-api", "GET {}", url);
trace!("headers: {:#?}", headers);
let response = self
.http
.get(&url)
.headers(headers)
.send()
.await
.map_err(Error::Client)?;
self.handle_response_typed(response, "GET", &url).await
}
/// Issue a POST request with an optional JSON body and deserialize the response.
///
/// Use this for action endpoints like `POST /api/core/firmware/install/os-haproxy`.
///
/// # Example
///
/// ```ignore
/// let resp: FirmwareInstallResponse = client
/// .post_typed("core", "firmware", "install", Some(&json!({"pkg_name": "os-haproxy"})))
/// .await?;
/// ```
pub async fn post_typed<R, B>(&self, module: &str, controller: &str, command: &str, body: Option<B>) -> Result<R, Error>
where
R: DeserializeOwned + std::fmt::Debug,
B: serde::Serialize,
{
let url = format!(
"{}/{}/{}/{}",
self.base_url, module, controller, command
);
let mut headers = HeaderMap::new();
add_auth_headers(&mut headers, &self.credentials);
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
debug!(target: "opnsense-api", "POST {}", url);
let request = self.http.post(&url).headers(headers);
let request = match body {
Some(b) => {
let json = serde_json::to_string(&b).map_err(|e| Error::JsonDecode {
context: url.clone(),
source: Box::new(e),
})?;
trace!("body: {json}");
request.body(json)
}
None => request,
};
let response = request.send().await.map_err(Error::Client)?;
self.handle_response_typed(response, "POST", &url).await
}
async fn handle_response_typed<R>(
&self,
response: reqwest::Response,
method: &str,
url: &str,
) -> Result<R, Error>
where
R: DeserializeOwned + std::fmt::Debug,
{
let status = response.status();
if status.is_success() {
debug!("Reponse success, content : {response:#?}");
let body = response.text().await?;
debug!("Reponse success, body : {:#?}", body);
let json = serde_json::from_str(&body);
debug!("Reponse success, json : {:#?}", json);
let json: R = json.map_err(|e| Error::JsonDecode {
context: url.to_string(),
source: Box::new(e),
})?;
debug!(target: "opnsense-api", "{} {} → HTTP {status}", method, url);
Ok(json)
} else {
let body = response.text().await.unwrap_or_default();
warn!(target: "opnsense-api", "{} {} → HTTP {status}: {}", method, url, body);
Err(Error::Api {
status,
method: method.to_string(),
path: url.to_string(),
body,
})
}
}
}

37
opnsense-api/src/error.rs Normal file
View File

@@ -0,0 +1,37 @@
//! Typed error types for the opnsense-api client.
use thiserror::Error;
/// Errors that can occur when calling the OPNsense API.
#[derive(Debug, Error)]
pub enum Error {
/// Failed to build or send the HTTP request.
#[error("http client error: {0}")]
Client(#[source] reqwest::Error),
/// Server returned a non-2xx HTTP status.
#[error("OPNsense returned HTTP {status} for {method} {path}: {body}")]
Api {
status: reqwest::StatusCode,
method: String,
path: String,
body: String,
},
/// Response body could not be decoded as JSON.
#[error("json decode error for {context}: {source}")]
JsonDecode {
context: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
/// No API key or secret was provided.
#[error("no credentials provided — call `.auth_from_key_secret()` or `.auth_interactive()`")]
NoCredentials,
}
impl From<reqwest::Error> for Error {
fn from(value: reqwest::Error) -> Self {
Error::Client(value)
}
}

View File

@@ -0,0 +1,226 @@
//! Auto-generated types for the **Dnsmasq** OPNsense model.
//!
//! Source XML: `core/src/opnsense/mvc/app/models/OPNsense/Dnsmasq/Dnsmasq.xml`
//!
//! Mount: `//dnsmasq`
//!
//! API endpoint: `GET /api/dnsmasq/settings/get`
//!
//! **DO NOT EDIT** — produced by `opnsense-codegen`.
//!
//! ## Response wrapper key
//!
//! This model uses `"dnsmasq"` as the JSON response key (the controller's
//! `internalModelName`). The wrapper struct is [`DnsmasqSettingsResponse`].
use serde::{Deserialize, Serialize};
pub mod serde_helpers {
pub mod opn_bool {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(
value: &Option<bool>,
serializer: S,
) -> Result<S::Ok, S::Error> {
match value {
Some(true) => serializer.serialize_str("1"),
Some(false) => serializer.serialize_str("0"),
None => serializer.serialize_str(""),
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<bool>, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) => match s.as_str() {
"1" | "true" => Ok(Some(true)),
"0" | "false" => Ok(Some(false)),
"" => Ok(None),
other => Err(serde::de::Error::custom(format!(
"invalid bool string: {other}"
))),
},
serde_json::Value::Bool(b) => Ok(Some(*b)),
serde_json::Value::Number(n) => match n.as_u64() {
Some(1) => Ok(Some(true)),
Some(0) => Ok(Some(false)),
_ => Err(serde::de::Error::custom(format!(
"invalid bool number: {n}"
))),
},
serde_json::Value::Null => Ok(None),
_ => Err(serde::de::Error::custom(
"expected string, bool, or number for bool field",
)),
}
}
}
pub mod opn_bool_req {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(value: &bool, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(if *value { "1" } else { "0" })
}
pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<bool, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) => match s.as_str() {
"1" | "true" => Ok(true),
"0" | "false" => Ok(false),
other => Err(serde::de::Error::custom(format!(
"invalid required bool: {other}"
))),
},
serde_json::Value::Bool(b) => Ok(*b),
serde_json::Value::Number(n) => match n.as_u64() {
Some(1) => Ok(true),
Some(0) => Ok(false),
_ => Err(serde::de::Error::custom(format!(
"invalid required bool number: {n}"
))),
},
_ => Err(serde::de::Error::custom(
"expected string, bool, or number for required bool",
)),
}
}
}
pub mod opn_u32 {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(
value: &Option<u32>,
serializer: S,
) -> Result<S::Ok, S::Error> {
match value {
Some(v) => serializer.serialize_str(&v.to_string()),
None => serializer.serialize_str(""),
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<u32>, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) if s.is_empty() => Ok(None),
serde_json::Value::String(s) => {
s.parse::<u32>().map(Some).map_err(serde::de::Error::custom)
}
serde_json::Value::Number(n) => n
.as_u64()
.and_then(|n| u32::try_from(n).ok())
.map(Some)
.ok_or_else(|| serde::de::Error::custom("number out of u32 range")),
serde_json::Value::Null => Ok(None),
_ => Err(serde::de::Error::custom(
"expected string or number for u32",
)),
}
}
}
}
/// Root model for `GET /api/dnsmasq/settings/get`.
///
/// All boolean fields use OPNsense's `"1"`/`"0"` wire encoding.
/// Array fields (`hosts`, `domainoverrides`, etc.) are represented as
/// `serde_json::Value` — use `client.get_raw()` if you need to inspect them.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DnsmasqSettings {
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub enable: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub regdhcp: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub regdhcpstatic: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub dhcpfirst: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub strict_order: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub domain_needed: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub no_private_reverse: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub no_resolv: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub log_queries: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub no_hosts: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub strictbind: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub dnssec: bool,
#[serde(default)]
pub regdhcpdomain: Option<String>,
#[serde(default)]
pub interface: Option<serde_json::Value>,
#[serde(default, with = "crate::generated::dnsmasq::serde_helpers::opn_u32")]
pub port: Option<u32>,
#[serde(default, with = "crate::generated::dnsmasq::serde_helpers::opn_u32")]
pub dns_forward_max: Option<u32>,
#[serde(default, with = "crate::generated::dnsmasq::serde_helpers::opn_u32")]
pub cache_size: Option<u32>,
#[serde(default, with = "crate::generated::dnsmasq::serde_helpers::opn_u32")]
pub local_ttl: Option<u32>,
#[serde(default)]
pub add_mac: Option<serde_json::Value>,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub add_subnet: bool,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub strip_subnet: bool,
#[serde(default)]
pub dhcp: Option<serde_json::Value>,
#[serde(with = "crate::generated::dnsmasq::serde_helpers::opn_bool_req")]
pub no_ident: bool,
#[serde(default)]
pub hosts: Option<serde_json::Value>,
#[serde(default)]
pub domainoverrides: Option<serde_json::Value>,
#[serde(default)]
pub dhcp_tags: Option<serde_json::Value>,
#[serde(default)]
pub dhcp_ranges: Option<serde_json::Value>,
#[serde(default)]
pub dhcp_options: Option<serde_json::Value>,
#[serde(default)]
pub dhcp_boot: Option<serde_json::Value>,
}
/// Response wrapper for `GET /api/dnsmasq/settings/get`.
///
/// OPNsense returns `{ "dnsmasq": { ... } }` where the inner object is a
/// [`DnsmasqSettings`].
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DnsmasqSettingsResponse {
pub dnsmasq: DnsmasqSettings,
}

View File

@@ -0,0 +1,227 @@
//! Auto-generated types for the **Interfaces/Settings** OPNsense model.
//!
//! Source XML: `core/src/opnsense/mvc/app/models/OPNsense/Interfaces/Settings.xml`
//!
//! Mount: `//OPNsense/Interfaces/settings`
//!
//! API endpoint: `GET /api/interfaces/settings/get`
//!
//! **DO NOT EDIT** — produced by `opnsense-codegen`.
//!
//! ## Response wrapper key
//!
//! This model uses `"settings"` as the JSON response key (the controller's
//! `internalModelName`). The wrapper struct is [`InterfacesSettingsResponse`].
use serde::{Deserialize, Serialize};
/// VLAN hardware-filtering preference.
///
/// Serialized by OPNsense as `"opt0"` / `"opt1"` / `"opt2"`.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Disablevlanhwfilter {
EnableVlanHardwareFiltering,
DisableVlanHardwareFiltering,
LeaveDefault,
}
/// Per-variant serde for [`Disablevlanhwfilter`].
pub(crate) mod serde_disablevlanhwfilter {
use super::Disablevlanhwfilter;
use log::debug;
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(
value: &Option<Disablevlanhwfilter>,
serializer: S,
) -> Result<S::Ok, S::Error> {
serializer.serialize_str(match value {
Some(Disablevlanhwfilter::EnableVlanHardwareFiltering) => "opt0",
Some(Disablevlanhwfilter::DisableVlanHardwareFiltering) => "opt1",
Some(Disablevlanhwfilter::LeaveDefault) => "opt2",
None => "",
})
}
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<Disablevlanhwfilter>, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
debug!("Disablevlanhwfilter deserializing {v}");
match v {
serde_json::Value::String(s) => match s.as_str() {
"opt0" => Ok(Some(Disablevlanhwfilter::EnableVlanHardwareFiltering)),
"opt1" => Ok(Some(Disablevlanhwfilter::DisableVlanHardwareFiltering)),
"opt2" => Ok(Some(Disablevlanhwfilter::LeaveDefault)),
"" => Ok(None),
other => Err(serde::de::Error::custom(format!(
"unknown Disablevlanhwfilter variant: {other}"
))),
},
serde_json::Value::Null => Ok(None),
_ => Err(serde::de::Error::custom(
"expected string for Disablevlanhwfilter",
)),
}
}
}
// ── OPNsense serde helpers (same helpers used across all generated models) ───
pub mod opn_bool {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(
value: &Option<bool>,
serializer: S,
) -> Result<S::Ok, S::Error> {
match value {
Some(true) => serializer.serialize_str("1"),
Some(false) => serializer.serialize_str("0"),
None => serializer.serialize_str(""),
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<bool>, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) => match s.as_str() {
"1" | "true" => Ok(Some(true)),
"0" | "false" => Ok(Some(false)),
"" => Ok(None),
other => Err(serde::de::Error::custom(format!(
"invalid bool string: {other}"
))),
},
serde_json::Value::Bool(b) => Ok(Some(*b)),
serde_json::Value::Number(n) => match n.as_u64() {
Some(1) => Ok(Some(true)),
Some(0) => Ok(Some(false)),
_ => Err(serde::de::Error::custom(format!(
"invalid bool number: {n}"
))),
},
serde_json::Value::Null => Ok(None),
_ => Err(serde::de::Error::custom(
"expected string, bool, or number for bool field",
)),
}
}
}
pub mod opn_bool_req {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(value: &bool, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(if *value { "1" } else { "0" })
}
pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<bool, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) => match s.as_str() {
"1" | "true" => Ok(true),
"0" | "false" => Ok(false),
other => Err(serde::de::Error::custom(format!(
"invalid required bool: {other}"
))),
},
serde_json::Value::Bool(b) => Ok(*b),
serde_json::Value::Number(n) => match n.as_u64() {
Some(1) => Ok(true),
Some(0) => Ok(false),
_ => Err(serde::de::Error::custom(format!(
"invalid required bool number: {n}"
))),
},
_ => Err(serde::de::Error::custom(
"expected string, bool, or number for required bool",
)),
}
}
}
pub mod opn_u32 {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S: Serializer>(value: &Option<u32>, serializer: S) -> Result<S::Ok, S::Error> {
match value {
Some(v) => serializer.serialize_str(&v.to_string()),
None => serializer.serialize_str(""),
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Option<u32>, D::Error> {
let v = serde_json::Value::deserialize(deserializer)?;
match &v {
serde_json::Value::String(s) if s.is_empty() => Ok(None),
serde_json::Value::String(s) => {
s.parse::<u32>().map(Some).map_err(serde::de::Error::custom)
}
serde_json::Value::Number(n) => n
.as_u64()
.and_then(|n| u32::try_from(n).ok())
.map(Some)
.ok_or_else(|| serde::de::Error::custom("number out of u32 range")),
serde_json::Value::Null => Ok(None),
_ => Err(serde::de::Error::custom(
"expected string or number for u32",
)),
}
}
}
// ── Root model ────────────────────────────────────────────────────────────────
/// Root model for `GET /api/interfaces/settings/get`.
///
/// All boolean fields use OPNsense's `"1"`/`"0"` wire encoding.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InterfacesSettings {
/// BooleanField — disable checksum offloading
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub disablechecksumoffloading: bool,
/// BooleanField — disable segmentation offloading
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub disablesegmentationoffloading: bool,
/// BooleanField — disable large receive offloading
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub disablelargereceiveoffloading: bool,
/// OptionField: opt0|opt1|opt2 — VLAN hardware filtering
#[serde(
default,
with = "crate::generated::interfaces::serde_disablevlanhwfilter"
)]
pub disablevlanhwfilter: Option<Disablevlanhwfilter>,
/// BooleanField — disable IPv6
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub disableipv6: bool,
/// BooleanField — DHCPv6 no-release
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub dhcp6_norelease: bool,
/// BooleanField — DHCPv6 debug
#[serde(with = "crate::generated::interfaces::opn_bool_req")]
pub dhcp6_debug: bool,
/// DUIDField — DHCPv6 DUID (optional)
#[serde(default)]
pub dhcp6_duid: Option<String>,
/// IntegerField — DHCPv6 release timeout (seconds)
#[serde(default, with = "crate::generated::interfaces::opn_u32")]
pub dhcp6_ratimeout: Option<u32>,
}
/// Response wrapper for `GET /api/interfaces/settings/get`.
///
/// OPNsense returns `{ "settings": { ... } }` where the inner object is an
/// [`InterfacesSettings`].
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InterfacesSettingsResponse {
/// The controller's `internalModelName` is `"settings"`.
pub settings: InterfacesSettings,
}

73
opnsense-api/src/lib.rs Normal file
View File

@@ -0,0 +1,73 @@
//! OPNsense typed API client.
//!
//! ## Design goals
//!
//! - **Generated types**: Model structs and enums are produced by `opnsense-codegen`
//! from OPNsense XML model files and placed in [`generated`].
//!
//! - **Hand-written runtime**: HTTP client, auth, and error handling live in this crate
//! and are never auto-generated.
//!
//! ## Crate layout
//!
//! ```text
//! opnsense_api/
//! src/
//! lib.rs — public re-exports
//! error.rs — Error type
//! client.rs — OpnsenseClient
//! auth.rs — credentials helpers
//! generated/
//! dnsmasq.rs — types from Dnsmasq.xml
//! interfaces.rs — types from Interfaces/Settings.xml
//! ...
//! examples/
//! list_dnsmasq.rs
//! list_packages.rs
//! install_package.rs
//! firmware_update.rs
//! ```
//!
//! ## Usage
//!
//! ```ignore
//! use opnsense_api::OpnsenseClient;
//!
//! let client = OpnsenseClient::builder()
//! .base_url("https://my-firewall.local/api")
//! .auth_from_key_secret("key", "secret")
//! .build()?;
//!
//! // GET a typed model response
//! let resp = client.get_typed::<dnsmasq::DnsmasqSettingsResponse>("dnsmasq", "settings", "get").await?;
//! println!("{:#?}", resp);
//! ```
pub mod error;
pub mod auth;
pub mod client;
pub use error::Error;
pub use client::OpnsenseClient;
pub mod generated {
//! Auto-generated model types.
//!
//! Each module corresponds to one OPNsense model (e.g. `interfaces`, `haproxy`,
//! `dnsmasq`). These files are produced by `opnsense-codegen` — do not edit
//! by hand.
//!
//! ## Standard module structure
//!
//! Every generated module exposes:
//!
//! ```ignore
//! pub mod serde_helpers { /* per-enum serde modules */ }
//! pub struct RootModel { ... }
//! pub struct RootModelResponse { pub model_key: RootModel }
//! pub enum SomeEnum { ... }
//! ```
pub mod dnsmasq;
pub mod interfaces;
}

View File

@@ -0,0 +1,24 @@
[package]
name = "opnsense-codegen"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[[bin]]
name = "opnsense-codegen"
path = "src/main.rs"
[dependencies]
serde.workspace = true
serde_json.workspace = true
clap.workspace = true
thiserror.workspace = true
log.workspace = true
env_logger.workspace = true
quick-xml = { version = "0.37", features = ["serialize"] }
heck = "0.5"
toml = "0.8"
[dev-dependencies]
pretty_assertions.workspace = true

View File

@@ -0,0 +1,92 @@
<model>
<mount>/example/service</mount>
<description>Example Service for Codegen Testing</description>
<version>1.0.0</version>
<items>
<enable type="BooleanField"/>
<name type="TextField">
<Required>Y</Required>
</name>
<port type="IntegerField">
<MinimumValue>1</MinimumValue>
<MaximumValue>65535</MaximumValue>
<Default>8080</Default>
</port>
<log_level type="OptionField">
<OptionValues>
<debug>Debug</debug>
<info>Info</info>
<warn>Warning</warn>
<error>Error</error>
</OptionValues>
<Default>info</Default>
</log_level>
<listen_address type="NetworkField">
<NetMaskAllowed>N</NetMaskAllowed>
</listen_address>
<interface type="InterfaceField">
<Multiple>Y</Multiple>
</interface>
<domain type="HostnameField">
<IsDNSName>Y</IsDNSName>
</domain>
<cache_size type="IntegerField">
<MinimumValue>0</MinimumValue>
</cache_size>
<upstream>
<dns_servers type="NetworkField">
<NetMaskAllowed>N</NetMaskAllowed>
<AsList>Y</AsList>
</dns_servers>
<use_system_dns type="BooleanField">
<Required>Y</Required>
<Default>1</Default>
</use_system_dns>
</upstream>
<hosts type="ArrayField">
<enabled type="BooleanField">
<Default>1</Default>
</enabled>
<hostname type="HostnameField">
<Required>Y</Required>
</hostname>
<ip type="NetworkField">
<NetMaskAllowed>N</NetMaskAllowed>
<Required>Y</Required>
</ip>
<tag type="ModelRelationField">
<Model>
<tag>
<source>OPNsense.Example.ExampleService</source>
<items>tags</items>
<display>name</display>
</tag>
</Model>
</tag>
<aliases type="HostnameField">
<AsList>Y</AsList>
</aliases>
<description type="TextField"/>
</hosts>
<tags type="ArrayField">
<name type="TextField">
<Required>Y</Required>
<Mask>/^[a-zA-Z0-9_]{1,64}$/</Mask>
<Constraints>
<check001>
<type>UniqueConstraint</type>
<ValidationMessage>Tag names must be unique.</ValidationMessage>
</check001>
</Constraints>
</name>
<color type="OptionField">
<OptionValues>
<red>Red</red>
<green>Green</green>
<blue>Blue</blue>
</OptionValues>
</color>
<description type="TextField"/>
</tags>
</items>
</model>

View File

@@ -0,0 +1,41 @@
{
"exampleservice": {
"enable": "1",
"name": "myservice",
"port": "8080",
"log_level": "info",
"listen_address": "192.168.1.1",
"interface": "lan,wan",
"domain": "example.local",
"cache_size": "1000",
"upstream": {
"dns_servers": "8.8.8.8,8.8.4.4",
"use_system_dns": "1"
},
"hosts": {
"c9a1b2d3-e4f5-6789-abcd-ef0123456789": {
"enabled": "1",
"hostname": "server1",
"ip": "10.0.0.1",
"tag": "d1e2f3a4-b5c6-7890-abcd-ef0123456789",
"aliases": "srv1,server1.lan",
"description": "Primary server"
},
"a1b2c3d4-e5f6-7890-abcd-ef0123456789": {
"enabled": "0",
"hostname": "server2",
"ip": "10.0.0.2",
"tag": "",
"aliases": "",
"description": ""
}
},
"tags": {
"d1e2f3a4-b5c6-7890-abcd-ef0123456789": {
"name": "production",
"color": "green",
"description": "Production servers"
}
}
}
}

View File

@@ -0,0 +1,254 @@
{
"mount": "/example/service",
"description": "Example Service for Codegen Testing",
"version": "1.0.0",
"api_key": "exampleservice",
"root_struct_name": "ExampleService",
"enums": [
{
"name": "LogLevel",
"variants": [
{ "rust_name": "Debug", "wire_value": "debug" },
{ "rust_name": "Info", "wire_value": "info" },
{ "rust_name": "Warn", "wire_value": "warn" },
{ "rust_name": "Error", "wire_value": "error" }
]
},
{
"name": "TagColor",
"variants": [
{ "rust_name": "Red", "wire_value": "red" },
{ "rust_name": "Green", "wire_value": "green" },
{ "rust_name": "Blue", "wire_value": "blue" }
]
}
],
"structs": [
{
"name": "ExampleService",
"kind": "root",
"fields": [
{
"name": "enable",
"rust_type": "Option<bool>",
"serde_with": "crate::serde_helpers::opn_bool",
"opn_type": "BooleanField",
"required": false,
"doc": "Enable service"
},
{
"name": "name",
"rust_type": "String",
"opn_type": "TextField",
"required": true,
"doc": "Service name (required)"
},
{
"name": "port",
"rust_type": "Option<u16>",
"serde_with": "crate::serde_helpers::opn_u16",
"opn_type": "IntegerField",
"required": false,
"default": "8080",
"min": 1,
"max": 65535,
"doc": "Port number [1-65535] default: 8080"
},
{
"name": "log_level",
"rust_type": "Option<LogLevel>",
"serde_with": "serde_log_level",
"opn_type": "OptionField",
"required": false,
"default": "info",
"enum_ref": "LogLevel",
"doc": "Log level: debug|info|warn|error, default: info"
},
{
"name": "listen_address",
"rust_type": "Option<String>",
"serde_with": "crate::serde_helpers::opn_string",
"opn_type": "NetworkField",
"required": false,
"doc": "Listen IP address"
},
{
"name": "interface",
"rust_type": "Option<Vec<String>>",
"serde_with": "crate::serde_helpers::opn_csv",
"opn_type": "InterfaceField",
"required": false,
"multiple": true,
"doc": "Bind interfaces (comma-separated)"
},
{
"name": "domain",
"rust_type": "Option<String>",
"serde_with": "crate::serde_helpers::opn_string",
"opn_type": "HostnameField",
"required": false,
"doc": "DNS domain name"
},
{
"name": "cache_size",
"rust_type": "Option<u32>",
"serde_with": "crate::serde_helpers::opn_u32",
"opn_type": "IntegerField",
"required": false,
"min": 0,
"doc": "Cache size [0-∞)"
},
{
"name": "upstream",
"rust_type": "ExampleServiceUpstream",
"opn_type": "Container",
"required": true,
"field_kind": "container",
"struct_ref": "ExampleServiceUpstream",
"doc": "Upstream DNS settings"
},
{
"name": "hosts",
"rust_type": "HashMap<String, ExampleServiceHost>",
"opn_type": "ArrayField",
"required": false,
"field_kind": "array_field",
"struct_ref": "ExampleServiceHost",
"doc": "Host override entries (keyed by UUID)"
},
{
"name": "tags",
"rust_type": "HashMap<String, ExampleServiceTag>",
"opn_type": "ArrayField",
"required": false,
"field_kind": "array_field",
"struct_ref": "ExampleServiceTag",
"doc": "Tag definitions (keyed by UUID)"
}
]
},
{
"name": "ExampleServiceUpstream",
"kind": "container",
"json_key": "upstream",
"fields": [
{
"name": "dns_servers",
"rust_type": "Option<Vec<String>>",
"serde_with": "crate::serde_helpers::opn_csv",
"opn_type": "NetworkField",
"required": false,
"as_list": true,
"doc": "Upstream DNS servers (comma-separated IPs)"
},
{
"name": "use_system_dns",
"rust_type": "bool",
"serde_with": "crate::serde_helpers::opn_bool_req",
"opn_type": "BooleanField",
"required": true,
"default": "1",
"doc": "Use system DNS servers (required, default: true)"
}
]
},
{
"name": "ExampleServiceHost",
"kind": "array_item",
"json_key": "hosts",
"fields": [
{
"name": "enabled",
"rust_type": "Option<bool>",
"serde_with": "crate::serde_helpers::opn_bool",
"opn_type": "BooleanField",
"required": false,
"default": "1",
"doc": "Entry enabled (default: true)"
},
{
"name": "hostname",
"rust_type": "String",
"opn_type": "HostnameField",
"required": true,
"doc": "Hostname (required)"
},
{
"name": "ip",
"rust_type": "String",
"opn_type": "NetworkField",
"required": true,
"doc": "IP address (required)"
},
{
"name": "tag",
"rust_type": "Option<String>",
"serde_with": "crate::serde_helpers::opn_string",
"opn_type": "ModelRelationField",
"required": false,
"relation": {
"source": "OPNsense.Example.ExampleService",
"items": "tags",
"display": "name"
},
"doc": "Associated tag UUID"
},
{
"name": "aliases",
"rust_type": "Option<Vec<String>>",
"serde_with": "crate::serde_helpers::opn_csv",
"opn_type": "HostnameField",
"required": false,
"as_list": true,
"doc": "Hostname aliases (comma-separated)"
},
{
"name": "description",
"rust_type": "Option<String>",
"serde_with": "crate::serde_helpers::opn_string",
"opn_type": "TextField",
"required": false,
"doc": "Description"
}
]
},
{
"name": "ExampleServiceTag",
"kind": "array_item",
"json_key": "tags",
"fields": [
{
"name": "name",
"rust_type": "String",
"opn_type": "TextField",
"required": true,
"mask": "/^[a-zA-Z0-9_]{1,64}$/",
"constraints": [
{
"type": "UniqueConstraint",
"message": "Tag names must be unique."
}
],
"doc": "Tag name (required, unique, alphanumeric+underscore, 1-64 chars)"
},
{
"name": "color",
"rust_type": "Option<TagColor>",
"serde_with": "serde_tag_color",
"opn_type": "OptionField",
"required": false,
"enum_ref": "TagColor",
"doc": "Tag color: red|green|blue"
},
{
"name": "description",
"rust_type": "Option<String>",
"serde_with": "crate::serde_helpers::opn_string",
"opn_type": "TextField",
"required": false,
"doc": "Description"
}
]
}
]
}

View File

@@ -0,0 +1,304 @@
use crate::ir::{EnumIR, FieldIR, ModelIR, StructIR, StructKind};
use std::fmt::{Result as FmtResult, Write};
pub struct CodeGenerator {
output: String,
}
impl CodeGenerator {
pub fn new() -> Self {
Self {
output: String::new(),
}
}
pub fn generate(&mut self, model: &ModelIR) -> FmtResult {
let module_name = derive_module_name(&model.root_struct_name);
writeln!(self.output, "//! Auto-generated from OPNsense model XML")?;
writeln!(
self.output,
"//! Mount: `{}` — Version: `{}`",
model.mount, model.version
)?;
writeln!(self.output, "//!")?;
writeln!(
self.output,
"//! **DO NOT EDIT** — produced by opnsense-codegen"
)?;
writeln!(self.output)?;
writeln!(self.output, "use serde::{{Deserialize, Serialize}};")?;
writeln!(self.output, "use std::collections::HashMap;")?;
writeln!(self.output)?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output, "// Enums")?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output)?;
for enum_ir in &model.enums {
self.generate_enum(enum_ir)?;
}
writeln!(self.output)?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output, "// Structs")?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output)?;
for struct_ir in &model.structs {
self.generate_struct(struct_ir, model)?;
}
writeln!(self.output)?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output, "// API Wrapper")?;
writeln!(
self.output,
"// ═══════════════════════════════════════════════════════════════════════════"
)?;
writeln!(self.output)?;
let response_name = format!("{}Response", model.root_struct_name);
let api_key = if model.api_key.is_empty() {
model.mount.trim_start_matches('/').replace('/', "")
} else {
model.api_key.clone()
};
writeln!(
self.output,
"/// Wrapper matching the OPNsense GET response envelope."
)?;
writeln!(
self.output,
"/// `GET /api/{}/get` returns {{ \"{}\": {{ ... }} }}",
api_key, api_key
)?;
writeln!(
self.output,
"#[derive(Debug, Clone, Serialize, Deserialize)]"
)?;
writeln!(self.output, "pub struct {} {{", response_name)?;
writeln!(
self.output,
" pub {}: {},",
api_key, model.root_struct_name
)?;
writeln!(self.output, "}}")?;
Ok(())
}
fn generate_enum(&mut self, enum_ir: &EnumIR) -> FmtResult {
let snake_name = to_snake_case(&enum_ir.name);
writeln!(self.output, "/// {}", enum_ir.name)?;
writeln!(self.output, "#[derive(Debug, Clone, PartialEq, Eq, Hash)]")?;
writeln!(self.output, "pub enum {} {{", enum_ir.name)?;
for variant in &enum_ir.variants {
writeln!(self.output, " {},", variant.rust_name)?;
}
writeln!(self.output, "}}")?;
writeln!(self.output)?;
writeln!(self.output, "pub(crate) mod serde_{} {{", snake_name)?;
writeln!(self.output, " use super::{};", enum_ir.name)?;
writeln!(
self.output,
" use serde::{{Deserialize, Deserializer, Serializer}};"
)?;
writeln!(self.output)?;
writeln!(self.output, " pub fn serialize<S: Serializer>(")?;
writeln!(self.output, " value: &Option<{}>,", enum_ir.name)?;
writeln!(self.output, " serializer: S,")?;
writeln!(self.output, " ) -> Result<S::Ok, S::Error> {{")?;
writeln!(
self.output,
" serializer.serialize_str(match value {{"
)?;
for variant in &enum_ir.variants {
writeln!(
self.output,
" Some({}::{}) => \"{}\",",
enum_ir.name, variant.rust_name, variant.wire_value
)?;
}
writeln!(self.output, " None => \"\",")?;
writeln!(self.output, " }})")?;
writeln!(self.output, " }}")?;
writeln!(self.output)?;
writeln!(
self.output,
" pub fn deserialize<'de, D: Deserializer<'de>>("
)?;
writeln!(self.output, " deserializer: D,")?;
writeln!(
self.output,
" ) -> Result<Option<{}>, D::Error> {{",
enum_ir.name
)?;
writeln!(
self.output,
" let v = serde_json::Value::deserialize(deserializer)?;"
)?;
writeln!(self.output, " match v {{")?;
writeln!(
self.output,
" serde_json::Value::String(s) => match s.as_str() {{"
)?;
for variant in &enum_ir.variants {
writeln!(
self.output,
" \"{}\" => Ok(Some({}::{})),",
variant.wire_value, enum_ir.name, variant.rust_name
)?;
}
writeln!(self.output, " \"\" => Ok(None),")?;
writeln!(
self.output,
" other => Err(serde::de::Error::custom(format!("
)?;
writeln!(
self.output,
" \"unknown {} variant: {{}}\", other",
enum_ir.name
)?;
writeln!(self.output, " ))),")?;
writeln!(self.output, " }},")?;
writeln!(
self.output,
" serde_json::Value::Null => Ok(None),"
)?;
writeln!(
self.output,
" _ => Err(serde::de::Error::custom(\"expected string for {}\")),",
enum_ir.name
)?;
writeln!(self.output, " }}")?;
writeln!(self.output, " }}")?;
writeln!(self.output, "}}")?;
writeln!(self.output)?;
Ok(())
}
fn generate_struct(&mut self, struct_ir: &StructIR, model: &ModelIR) -> FmtResult {
match struct_ir.kind {
StructKind::Root => {
writeln!(self.output, "/// Root model for `{}`", model.mount)?;
writeln!(
self.output,
"#[derive(Debug, Clone, Serialize, Deserialize)]"
)?;
writeln!(self.output, "pub struct {} {{", struct_ir.name)?;
for field in &struct_ir.fields {
self.generate_field(field)?;
}
writeln!(self.output, "}}")?;
}
StructKind::Container => {
let doc = struct_ir
.json_key
.as_ref()
.map(|k| format!("Container for `{}`", k))
.unwrap_or_else(|| "Container".to_string());
writeln!(self.output, "/// {}", doc)?;
writeln!(
self.output,
"#[derive(Debug, Clone, Serialize, Deserialize)]"
)?;
writeln!(self.output, "pub struct {} {{", struct_ir.name)?;
for field in &struct_ir.fields {
self.generate_field(field)?;
}
writeln!(self.output, "}}")?;
}
StructKind::ArrayItem => {
writeln!(
self.output,
"/// Array item for `{}`",
struct_ir.json_key.as_deref().unwrap_or("items")
)?;
writeln!(
self.output,
"#[derive(Debug, Clone, Serialize, Deserialize)]"
)?;
writeln!(self.output, "pub struct {} {{", struct_ir.name)?;
for field in &struct_ir.fields {
self.generate_field(field)?;
}
writeln!(self.output, "}}")?;
}
}
writeln!(self.output)?;
Ok(())
}
fn generate_field(&mut self, field: &FieldIR) -> FmtResult {
if let Some(ref doc) = field.doc {
writeln!(self.output, " /// {}", doc)?;
}
if let Some(ref serde_with) = field.serde_with {
if field.required {
writeln!(self.output, " #[serde(with = \"{}\")]", serde_with)?;
} else {
writeln!(
self.output,
" #[serde(default, with = \"{}\")]",
serde_with
)?;
}
} else if field.field_kind.as_deref() == Some("array_field") {
writeln!(self.output, " #[serde(default)]")?;
} else if !field.required {
writeln!(self.output, " #[serde(default)]")?;
}
writeln!(self.output, " pub {}: {},", field.name, field.rust_type)?;
writeln!(self.output)?;
Ok(())
}
pub fn into_output(self) -> String {
self.output
}
}
fn to_snake_case(s: &str) -> String {
let mut result = String::new();
for (i, c) in s.chars().enumerate() {
if c.is_uppercase() && i > 0 {
result.push('_');
}
result.push(c.to_ascii_lowercase());
}
result
}
pub fn derive_module_name(struct_name: &str) -> String {
to_snake_case(struct_name)
}
pub fn generate(model: &ModelIR) -> String {
let mut generator = CodeGenerator::new();
generator
.generate(model)
.expect("generation should not fail");
generator.into_output()
}

1718
opnsense-codegen/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,105 @@
use clap::{Parser, Subcommand};
use std::path::PathBuf;
#[derive(Parser)]
#[command(name = "opnsense-codegen")]
#[command(about = "OPNsense SDK code generator", long_about = None)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Parse an XML model file and output JSON IR
Parse {
/// Path to the XML model file
#[arg(long)]
xml: PathBuf,
/// Output the IR as JSON to stdout
#[arg(long, default_value_t = false)]
ir_only: bool,
},
/// Generate Rust code from an XML model file
Generate {
/// Path to the XML model file
#[arg(long)]
xml: PathBuf,
/// Path to the TOML manifest (optional)
#[arg(long)]
manifest: Option<PathBuf>,
/// Output directory for generated files
#[arg(long)]
output_dir: Option<PathBuf>,
},
/// Build all models from manifests and generate the full opnsense-client
Build {
/// Path to the manifests directory
#[arg(long, default_value = "manifests")]
manifests_dir: PathBuf,
/// Output directory for generated files
#[arg(long, default_value = "../opnsense-client/src")]
output_dir: PathBuf,
},
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let cli = Cli::parse();
match cli.command {
Commands::Parse { xml, ir_only } => {
let xml_data = std::fs::read(&xml)?;
let model = opnsense_codegen::parser::parse_xml(&xml_data)
.map_err(|e| format!("parse error: {}", e))?;
if ir_only {
let json = serde_json::to_string_pretty(&model)?;
println!("{}", json);
} else {
println!(
"Parsed model: {} (struct: {})",
model.mount, model.root_struct_name
);
println!(" {} enums", model.enums.len());
println!(" {} structs", model.structs.len());
for s in &model.structs {
println!(" - {} ({:?}): {} fields", s.name, s.kind, s.fields.len());
}
}
}
Commands::Generate {
xml,
manifest,
output_dir,
} => {
let xml_data = std::fs::read(&xml)?;
let model = opnsense_codegen::parser::parse_xml(&xml_data)
.map_err(|e| format!("parse error: {}", e))?;
let rust_code = opnsense_codegen::codegen::generate(&model);
if let Some(dir) = output_dir {
std::fs::create_dir_all(&dir)?;
let module_name =
opnsense_codegen::codegen::derive_module_name(&model.root_struct_name);
let out_file = dir.join(format!("{}.rs", module_name));
std::fs::write(&out_file, &rust_code)?;
println!("Generated: {}", out_file.display());
} else {
println!("{}", rust_code);
}
}
Commands::Build {
manifests_dir,
output_dir,
} => {
println!("Build command not yet implemented");
println!("Manifests dir: {}", manifests_dir.display());
println!("Output dir: {}", output_dir.display());
}
}
Ok(())
}

File diff suppressed because it is too large Load Diff

1
opnsense-codegen/vendor/core vendored Submodule