Compare commits

..

1 Commits

464 changed files with 7056 additions and 38229 deletions

View File

@@ -1,6 +1,2 @@
target/
Dockerfile
.git
data
target
demos
Dockerfile

5
.gitignore vendored
View File

@@ -24,8 +24,3 @@ Cargo.lock
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
.harmony_generated
# Useful to create ignore folders for temp files and notes
ignore

View File

@@ -1,26 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT host_id, installation_device FROM host_role_mapping WHERE role = ?",
"describe": {
"columns": [
{
"name": "host_id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "installation_device",
"ordinal": 1,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
true
]
},
"hash": "24f719d57144ecf4daa55f0aa5836c165872d70164401c0388e8d625f1b72d7b"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "SQLite",
"query": "SELECT host_id FROM host_role_mapping WHERE role = ?",
"describe": {
"columns": [
{
"name": "host_id",
"ordinal": 0,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false
]
},
"hash": "2ea29df2326f7c84bd4100ad510a3fd4878dc2e217dc83f9bf45a402dfd62a91"
}

View File

@@ -1,12 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO host_role_mapping (host_id, role, installation_device)\n VALUES (?, ?, ?)\n ",
"query": "\n INSERT INTO host_role_mapping (host_id, role)\n VALUES (?, ?)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
"Right": 2
},
"nullable": []
},
"hash": "6fcc29cfdbdf3b2cee94a4844e227f09b245dd8f079832a9a7b774151cb03af6"
"hash": "df7a7c9cfdd0972e2e0ce7ea444ba8bc9d708a4fb89d5593a0be2bbebde62aff"
}

View File

@@ -1,548 +0,0 @@
# CI and Testing Strategy for Harmony
## Executive Summary
Harmony aims to become a CNCF project, requiring a robust CI pipeline that demonstrates real-world reliability. The goal is to run **all examples** in CI, from simple k3d deployments to full HA OKD clusters on bare metal. This document provides context for designing and implementing this testing infrastructure.
---
## Project Context
### What is Harmony?
Harmony is an infrastructure automation framework that is **code-first and code-only**. Operators write Rust programs to declare and drive infrastructure, rather than YAML files or DSL configs. Key differentiators:
1. **Compile-time safety**: The type system prevents "config-is-valid-but-platform-is-wrong" errors
2. **Topology abstraction**: Write once, deploy to any environment (local k3d, OKD, bare metal, cloud)
3. **Capability-based design**: Scores declare what they need; topologies provide what they have
### Core Abstractions
| Concept | Description |
|---------|-------------|
| **Score** | Declarative description of desired state (the "what") |
| **Topology** | Logical representation of infrastructure (the "where") |
| **Capability** | A feature a topology offers (the "how") |
| **Interpret** | Execution logic connecting Score to Topology |
### Compile-Time Verification
```rust
// This compiles only if K8sAnywhereTopology provides K8sclient + HelmCommand
impl<T: Topology + K8sclient + HelmCommand> Score<T> for MyScore { ... }
// This FAILS to compile - LinuxHostTopology doesn't provide K8sclient
// (intentionally broken example for testing)
impl<T: Topology + K8sclient> Score<T> for K8sResourceScore { ... }
// error: LinuxHostTopology does not implement K8sclient
```
---
## Current Examples Inventory
### Summary Statistics
| Category | Count | CI Complexity |
|----------|-------|---------------|
| k3d-compatible | 22 | Low - single k3d cluster |
| OKD-specific | 4 | Medium - requires OKD cluster |
| Bare metal | 4 | High - requires physical infra or nested virtualization |
| Multi-cluster | 3 | High - requires multiple K8s clusters |
| No infra needed | 4 | Trivial - local only |
### Detailed Example Classification
#### Tier 1: k3d-Compatible (22 examples)
Can run on a local k3d cluster with minimal setup:
| Example | Topology | Capabilities | Special Notes |
|---------|----------|--------------|---------------|
| zitadel | K8sAnywhereTopology | K8sClient, HelmCommand | SSO/Identity |
| node_health | K8sAnywhereTopology | K8sClient | Health checks |
| public_postgres | K8sAnywhereTopology | K8sClient, HelmCommand, TlsRouter | Needs ingress |
| openbao | K8sAnywhereTopology | K8sClient, HelmCommand | Vault alternative |
| rust | K8sAnywhereTopology | K8sClient, HelmCommand, TlsRouter | Webapp deployment |
| cert_manager | K8sAnywhereTopology | K8sClient, CertificateManagement | TLS certificates |
| try_rust_webapp | K8sAnywhereTopology | K8sClient, HelmCommand, TlsRouter | Full webapp |
| monitoring | K8sAnywhereTopology | K8sClient, HelmCommand, Observability | Prometheus |
| application_monitoring_with_tenant | K8sAnywhereTopology | K8sClient, HelmCommand, TenantManager, Observability | Multi-tenant |
| monitoring_with_tenant | K8sAnywhereTopology | K8sClient, HelmCommand, TenantManager, Observability | Multi-tenant |
| postgresql | K8sAnywhereTopology | K8sClient, HelmCommand | CloudNativePG |
| ntfy | K8sAnywhereTopology | K8sClient, HelmCommand | Notifications |
| tenant | K8sAnywhereTopology | K8sClient, TenantManager | Namespace isolation |
| lamp | K8sAnywhereTopology | K8sClient, HelmCommand, TlsRouter | LAMP stack |
| k8s_drain_node | K8sAnywhereTopology | K8sClient | Node operations |
| k8s_write_file_on_node | K8sAnywhereTopology | K8sClient | Node operations |
| remove_rook_osd | K8sAnywhereTopology | K8sClient | Ceph operations |
| validate_ceph_cluster_health | K8sAnywhereTopology | K8sClient | Ceph health |
| kube-rs | Direct kube | K8sClient | Raw kube-rs demo |
| brocade_snmp_server | K8sAnywhereTopology | K8sClient | SNMP collector |
| harmony_inventory_builder | LocalhostTopology | None | Network scanning |
| cli | LocalhostTopology | None | CLI demo |
#### Tier 2: OKD/OpenShift-Specific (4 examples)
Require OKD/OpenShift features not available in vanilla K8s:
| Example | Topology | OKD-Specific Feature |
|---------|----------|---------------------|
| okd_cluster_alerts | K8sAnywhereTopology | OpenShift Monitoring CRDs |
| operatorhub_catalog | K8sAnywhereTopology | OpenShift OperatorHub |
| rhob_application_monitoring | K8sAnywhereTopology | RHOB (Red Hat Observability) |
| nats-supercluster | K8sAnywhereTopology | OKD Routes (OpenShift Ingress) |
#### Tier 3: Bare Metal Infrastructure (4 examples)
Require physical hardware or full virtualization:
| Example | Topology | Physical Requirements |
|---------|----------|----------------------|
| okd_installation | HAClusterTopology | OPNSense, Brocade switch, PXE boot, 3+ nodes |
| okd_pxe | HAClusterTopology | OPNSense, Brocade switch, PXE infrastructure |
| sttest | HAClusterTopology | Full HA cluster with all network services |
| opnsense | OPNSenseFirewall | OPNSense firewall access |
| opnsense_node_exporter | Custom | OPNSense firewall |
#### Tier 4: Multi-Cluster (3 examples)
Require multiple K8s clusters:
| Example | Topology | Clusters Required |
|---------|----------|-------------------|
| nats | K8sAnywhereTopology × 2 | 2 clusters with NATS gateways |
| nats-module | DecentralizedTopology | 3 clusters for supercluster |
| multisite_postgres | FailoverTopology | 2 clusters for replication |
---
## Testing Categories
### 1. Compile-Time Tests
These tests verify that the type system correctly rejects invalid configurations:
```rust
// Should NOT compile - K8sResourceScore on LinuxHostTopology
#[test]
#[compile_fail]
fn test_k8s_score_on_linux_host() {
let score = K8sResourceScore::new();
let topology = LinuxHostTopology::new();
// This line should fail to compile
harmony_cli::run(Inventory::empty(), topology, vec![Box::new(score)], None);
}
// Should compile - K8sResourceScore on K8sAnywhereTopology
#[test]
fn test_k8s_score_on_k8s_topology() {
let score = K8sResourceScore::new();
let topology = K8sAnywhereTopology::from_env();
// This should compile
harmony_cli::run(Inventory::empty(), topology, vec![Box::new(score)], None);
}
```
**Implementation Options:**
- `trybuild` crate for compile-time failure tests
- Separate `tests/compile_fail/` directory with expected error messages
### 2. Unit Tests
Pure Rust logic without external dependencies:
- Score serialization/deserialization
- Inventory parsing
- Type conversions
- CRD generation
**Requirements:**
- No external services
- Sub-second execution
- Run on every PR
### 3. Integration Tests (k3d)
Deploy to a local k3d cluster:
**Setup:**
```bash
# Install k3d
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
# Create cluster
k3d cluster create harmony-test \
--agents 3 \
--k3s-arg "--disable=traefik@server:0"
# Wait for ready
kubectl wait --for=condition=Ready nodes --all --timeout=120s
```
**Test Matrix:**
| Example | k3d | Test Type |
|---------|-----|-----------|
| zitadel | ✅ | Deploy + health check |
| cert_manager | ✅ | Deploy + certificate issuance |
| monitoring | ✅ | Deploy + metric collection |
| postgresql | ✅ | Deploy + database connectivity |
| tenant | ✅ | Namespace creation + isolation |
### 4. Integration Tests (OKD)
Deploy to OKD/OpenShift cluster:
**Options:**
1. **Nested virtualization**: Run OKD in VMs (slow, expensive)
2. **CRC (CodeReady Containers)**: Single-node OKD (resource intensive)
3. **Managed OpenShift**: AWS/Azure/GCP (costly)
4. **Existing cluster**: Connect to pre-provisioned cluster (fastest)
**Test Matrix:**
| Example | OKD Required | Test Type |
|---------|--------------|-----------|
| okd_cluster_alerts | ✅ | Alert rule deployment |
| rhob_application_monitoring | ✅ | RHOB stack deployment |
| operatorhub_catalog | ✅ | Operator installation |
### 5. End-to-End Tests (Full Infrastructure)
Complete infrastructure deployment including bare metal:
**Options:**
1. **Libvirt + KVM**: Virtual machines on CI runner
2. **Nested KVM**: KVM inside KVM (for cloud CI)
3. **Dedicated hardware**: Physical test lab
4. **Mock/Hybrid**: Mock physical components, real K8s
---
## CI Environment Options
### Option A: GitHub Actions (Current Standard)
**Pros:**
- Native GitHub integration
- Large runner ecosystem
- Free for open source
**Cons:**
- Limited nested virtualization support
- 6-hour job timeout
- Resource constraints on free runners
**Matrix:**
```yaml
strategy:
matrix:
os: [ubuntu-latest]
rust: [stable, beta]
k8s: [k3d, kind]
tier: [unit, k3d-integration]
```
### Option B: Self-Hosted Runners
**Pros:**
- Full control over environment
- Can run nested virtualization
- No time limits
- Persistent state between runs
**Cons:**
- Maintenance overhead
- Cost of infrastructure
- Security considerations
**Setup:**
- Bare metal servers with KVM support
- Pre-installed k3d, kind, CRC
- OPNSense VM for network tests
### Option C: Hybrid (GitHub + Self-Hosted)
**Pros:**
- Fast unit tests on GitHub runners
- Heavy tests on self-hosted infrastructure
- Cost-effective
**Cons:**
- Two CI systems to maintain
- Complexity in test distribution
### Option D: Cloud CI (CircleCI, GitLab CI, etc.)
**Pros:**
- Often better resource options
- Docker-in-Docker support
- Better nested virtualization
**Cons:**
- Cost
- Less GitHub-native
---
## Performance Requirements
### Target Execution Times
| Test Category | Target Time | Current (est.) |
|---------------|-------------|----------------|
| Compile-time tests | < 30s | Unknown |
| Unit tests | < 60s | Unknown |
| k3d integration (per example) | < 120s | 60-300s |
| Full k3d matrix | < 15 min | 30-60 min |
| OKD integration | < 30 min | 1-2 hours |
| Full E2E | < 2 hours | 4-8 hours |
### Sub-Second Performance Strategies
1. **Parallel execution**: Run independent tests concurrently
2. **Incremental testing**: Only run affected tests on changes
3. **Cached clusters**: Pre-warm k3d clusters
4. **Layered testing**: Fail fast on cheaper tests
5. **Mock external services**: Fake Discord webhooks, etc.
---
## Test Data and Secrets Management
### Secrets Required
| Secret | Use | Storage |
|--------|-----|---------|
| Discord webhook URL | Alert receiver tests | GitHub Secrets |
| OPNSense credentials | Network tests | Self-hosted only |
| Cloud provider creds | Multi-cloud tests | Vault / GitHub Secrets |
| TLS certificates | Ingress tests | Generated on-the-fly |
### Test Data
| Data | Source | Strategy |
|------|--------|----------|
| Container images | Public registries | Cache locally |
| Helm charts | Public repos | Vendor in repo |
| K8s manifests | Generated | Dynamic |
---
## Proposed Test Architecture
```
┌─────────────────────────────────────────────────────────────────┐
│ harmony_e2e_tests Package │
│ (cargo run -p harmony_e2e_tests) │
├─────────────────────────────────────────────────────────────────┤
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │
│ │ Compile │ │ Unit │ │ Compile-Fail Tests │ │
│ │ Tests │ │ Tests │ │ (trybuild) │ │
│ │ < 30s │ │ < 60s │ │ < 30s │ │
│ └─────────────┘ └─────────────┘ └─────────────────────────┘ │
│ │
│ ┌───────────────────────────────────────────────────────────┐ │
│ │ k3d Integration Tests │ │
│ │ Self-provisions k3d cluster, runs 22 examples │ │
│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │
│ │ │ zitadel │ │ cert-mgr│ │ monitor │ │ postgres│ ... │ │
│ │ │ 60s │ │ 90s │ │ 120s │ │ 90s │ │ │
│ │ └─────────┘ └─────────┘ └─────────┘ └─────────┘ │ │
│ │ Parallel Execution │ │
│ └───────────────────────────────────────────────────────────┘ │
│ │
│ ┌───────────────────────────────────────────────────────────┐ │
│ │ OKD Integration Tests │ │
│ │ Connects to existing OKD cluster or provisions via KVM │ │
│ │ ┌─────────────────┐ ┌─────────────────────────────┐ │ │
│ │ │ okd_cluster_ │ │ rhob_application_ │ │ │
│ │ │ alerts (5 min) │ │ monitoring (10 min) │ │ │
│ │ └─────────────────┘ └─────────────────────────────┘ │ │
│ └───────────────────────────────────────────────────────────┘ │
│ │
│ ┌───────────────────────────────────────────────────────────┐ │
│ │ KVM-based E2E Tests │ │
│ │ Uses Harmony's KVM module to provision test VMs │ │
│ │ ┌─────────────────┐ ┌─────────────────────────────┐ │ │
│ │ │ okd_installation│ │ Full HA cluster deployment │ │ │
│ │ │ (30-60 min) │ │ (60-120 min) │ │ │
│ │ └─────────────────┘ └─────────────────────────────┘ │ │
│ └───────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────┘
Any CI system (GitHub Actions, GitLab CI, Jenkins, cron) just runs:
cargo run -p harmony_e2e_tests
```
┌─────────────────────────────────────────────────────────────────┐
GitHub Actions
├─────────────────────────────────────────────────────────────────┤
┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐
Compile Unit Compile-Fail Tests
Tests Tests (trybuild)
< 30s < 60s < 30s
└─────────────┘ └─────────────┘ └─────────────────────────┘
┌───────────────────────────────────────────────────────────┐
k3d Integration Tests
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
zitadel cert-mgr monitor postgres ...
60s 90s 120s 90s
└─────────┘ └─────────┘ └─────────┘ └─────────┘
Parallel Execution
└───────────────────────────────────────────────────────────┘
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
Self-Hosted Runners
├─────────────────────────────────────────────────────────────────┤
┌───────────────────────────────────────────────────────────┐
OKD Integration Tests
┌─────────────────┐ ┌─────────────────────────────┐
okd_cluster_ rhob_application_
alerts (5 min) monitoring (10 min)
└─────────────────┘ └─────────────────────────────┘
└───────────────────────────────────────────────────────────┘
┌───────────────────────────────────────────────────────────┐
KVM-based E2E Tests (Harmony provisions)
┌─────────────────────────────────────────────────────┐
Harmony KVM Module provisions test VMs
- OKD HA Cluster (3 control plane, 2 workers)
- OPNSense VM (router/firewall)
- Brocade simulator VM
└─────────────────────────────────────────────────────┘
└───────────────────────────────────────────────────────────┘
└─────────────────────────────────────────────────────────────────┘
```
---
## Questions for Researchers
### Critical Questions
1. **Self-contained test runner**: How to design `harmony_e2e_tests` package that runs all tests with a single `cargo run` command?
2. **Nested Virtualization**: What are the prerequisites for running KVM inside a test environment?
3. **Cost Optimization**: How to minimize cloud costs while running comprehensive E2E tests?
4. **Test Isolation**: How to ensure test isolation when running parallel k3d tests?
5. **State Management**: Should we persist k3d clusters between test runs, or create fresh each time?
6. **Mocking Strategy**: Which external services (Discord, OPNSense, etc.) should be mocked vs. real?
7. **Compile-Fail Tests**: Best practices for testing Rust compile-time errors?
8. **Multi-Cluster Tests**: How to efficiently provision and connect multiple K8s clusters in tests?
9. **Secrets Management**: How to handle secrets for test environments without external CI dependencies?
10. **Test Flakiness**: Strategies for reducing flakiness in infrastructure tests?
11. **Reporting**: How to present test results for complex multi-environment test matrices?
12. **Prerequisite Detection**: How to detect and validate prerequisites (Docker, k3d, KVM) before running tests?
### Research Areas
1. **CI/CD Tools**: Evaluate GitHub Actions, GitLab CI, CircleCI, Tekton, Prow for Harmony's needs
2. **K8s Test Tools**: Evaluate kind, k3d, minikube, microk8s for local testing
3. **Mock Frameworks**: Evaluate mock-server, wiremock, hoverfly for external service mocking
4. **Test Frameworks**: Evaluate built-in Rust test, nextest, cargo-tarpaulin for performance
---
## Success Criteria
### Week 1 (Agentic Velocity)
- [ ] Compile-time verification tests working
- [ ] Unit tests for monitoring module
- [ ] First 5 k3d examples running in CI
- [ ] Mock framework for Discord webhooks
### Week 2
- [ ] All 22 k3d-compatible examples in CI
- [ ] OKD self-hosted runner operational
- [ ] KVM module reviewed and ready for CI
### Week 3-4
- [ ] Full E2E tests with KVM infrastructure
- [ ] Multi-cluster tests automated
- [ ] All examples tested in CI
### Month 2
- [ ] Sub-15-minute total CI time
- [ ] Weekly E2E tests on bare metal
- [ ] Documentation complete
- [ ] Ready for CNCF submission
---
## Prerequisites
### Hardware Requirements
| Component | Minimum | Recommended |
|-----------|---------|------------|
| CPU | 4 cores | 8+ cores (for parallel tests) |
| RAM | 8 GB | 32 GB (for KVM E2E) |
| Disk | 50 GB SSD | 500 GB NVMe |
| Docker | Required | Latest |
| k3d | Required | v5.6.0 |
| Kubectl | Required | v1.28.0 |
| libvirt | Required | 9.0.0 (for KVM tests) |
### Software Requirements
| Tool | Version |
|------|---------|
| Rust | 1.75+ |
| Docker | 24.0+ |
| k3d | v5.6.0+ |
| kubectl | v1.28+ |
| libvirt | 9.0.0 |
### Installation (One-time)
```bash
# Install Rust
curl --proto '=https://sh.rustup.rs' -sSf | sh
# Install Docker
curl -fsSL https://get.docker.com -o docker-ce | sh
# Install k3d
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
# Install kubectl
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64" -o /usr/local/bin/kubectl
sudo mv /usr/local/bin/kubectl /usr/local/bin
```
---
## Reference Materials
### Existing Code
- Examples: `examples/*/src/main.rs`
- Topologies: `harmony/src/domain/topology/`
- Capabilities: `harmony/src/domain/topology/` (trait definitions)
- Scores: `harmony/src/modules/*/`
### Documentation
- [Coding Guide](docs/coding-guide.md)
- [Core Concepts](docs/concepts.md)
- [Monitoring Architecture](docs/monitoring.md)
- [ADR-020: Monitoring](adr/020-monitoring-alerting-architecture.md)
### Related Projects
- Crossplane (similar abstraction model)
- Pulumi (infrastructure as code)
- Terraform (state management patterns)
- Flux/ArgoCD (GitOps testing patterns)

View File

@@ -1,201 +0,0 @@
# Pragmatic CI and Testing Roadmap for Harmony
**Status**: Active implementation (March 2026)
**Core Principle**: Self-contained test runner — no dependency on centralized CI servers
All tests are executable via one command:
```bash
cargo run -p harmony_e2e_tests
```
The `harmony_e2e_tests` package:
- Provisions its own infrastructure when needed (k3d, KVM VMs)
- Runs all test tiers in sequence or selectively
- Reports results in text, JSON or JUnit XML
- Works identically on developer laptops, any Linux server, GitHub Actions, GitLab CI, Jenkins, cron jobs, etc.
- Is the single source of truth for what "passing CI" means
## Why This Approach
1. **Portability** — same command & behavior everywhere
2. **Harmony tests Harmony** — the framework validates itself
3. **No vendor lock-in** — GitHub Actions / GitLab CI are just triggers
4. **Perfect reproducibility** — developers reproduce any CI failure locally in seconds
5. **Offline capable** — after initial setup, most tiers run without internet
## Architecture: `harmony_e2e_tests` Package
```
harmony_e2e_tests/
├── Cargo.toml
├── src/
│ ├── main.rs # CLI entry point
│ ├── lib.rs # Test runner core logic
│ ├── tiers/
│ │ ├── mod.rs
│ │ ├── compile_fail.rs # trybuild-based compile-time checks
│ │ ├── unit.rs # cargo test --lib --workspace
│ │ ├── k3d.rs # k3d cluster + parallel example runs
│ │ ├── okd.rs # connect to existing OKD cluster
│ │ └── kvm.rs # full E2E via Harmony's own KVM module
│ ├── mocks/
│ │ ├── mod.rs
│ │ ├── discord.rs # mock Discord webhook receiver
│ │ └── opnsense.rs # mock OPNSense firewall API
│ └── infrastructure/
│ ├── mod.rs
│ ├── k3d.rs # k3d cluster lifecycle
│ └── kvm.rs # helper wrappers around KVM score
└── tests/
├── ui/ # trybuild compile-fail cases (*.rs + *.stderr)
└── fixtures/ # static test data / golden files
```
## CLI Interface ( clap-based )
```bash
# Run everything (default)
cargo run -p harmony_e2e_tests
# Specific tier
cargo run -p harmony_e2e_tests -- --tier k3d
cargo run -p harmony_e2e_tests -- --tier compile
# Filter to one example
cargo run -p harmony_e2e_tests -- --tier k3d --example monitoring
# Parallelism control (k3d tier)
cargo run -p harmony_e2e_tests -- --parallel 8
# Reporting
cargo run -p harmony_e2e_tests -- --report junit.xml
cargo run -p harmony_e2e_tests -- --format json
# Debug helpers
cargo run -p harmony_e2e_tests -- --verbose --dry-run
```
## Test Tiers Ordered by Speed & Cost
| Tier | Duration target | Runner type | What it tests | Isolation strategy |
|------------------|------------------|----------------------|----------------------------------------------------|-----------------------------|
| Compile-fail | < 20 s | Any (GitHub free) | Invalid configs don't compile | Per-file trybuild |
| Unit | < 60 s | Any | Pure Rust logic | cargo test |
| k3d | 815 min | GitHub / self-hosted | 22+ k3d-compatible examples | Fresh k3d cluster + ns-per-example |
| OKD | 1030 min | Self-hosted / CRC | OKD-specific features (Routes, Monitoring CRDs…) | Existing cluster via KUBECONFIG |
| KVM Full E2E | 60180 min | Self-hosted bare-metal | Full HA OKD install + bare-metal scenarios | Harmony KVM score provisions VMs |
### Tier Details & Implementation Notes
1. **Compile-fail**
Uses **`trybuild`** crate (standard in Rust ecosystem).
Place intentional compile errors in `tests/ui/*.rs` with matching `*.stderr` expectation files.
One test function replaces the old custom loop:
```rust
#[test]
fn ui() {
let t = trybuild::TestCases::new();
t.compile_fail("tests/ui/*.rs");
}
```
2. **Unit**
Simple wrapper: `cargo test --lib --workspace -- --nocapture`
Consider `cargo-nextest` later for 23× speedup if test count grows.
3. **k3d**
- Provisions isolated cluster once at start (`k3d cluster create --agents 3 --no-lb --disable traefik`)
- Discovers examples via `[package.metadata.harmony.test-tier = "k3d"]` in `Cargo.toml`
- Runs in parallel with tokio semaphore (default 58 slots)
- Each example gets its own namespace
- Uses `defer` / `scopeguard` for guaranteed cleanup
- Mocks Discord webhook and OPNSense API
4. **OKD**
Connects to pre-provisioned cluster via `KUBECONFIG`.
Validates it is actually OpenShift/OKD before proceeding.
5. **KVM**
Uses **Harmonys own KVM module** to provision test VMs (control-plane + workers + OPNSense).
→ True “dogfooding” — if the E2E fails, the KVM score itself is likely broken.
## CI Integration Patterns
### Fast PR validation (GitHub Actions)
```yaml
name: Fast Tests
on: [push, pull_request]
jobs:
fast:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install Docker & k3d
uses: nolar/setup-k3d-k3s@v1
- run: cargo run -p harmony_e2e_tests -- --tier compile,unit,k3d --report junit.xml
- uses: actions/upload-artifact@v4
with: { name: test-results, path: junit.xml }
```
### Nightly / Merge heavy tests (self-hosted runner)
```yaml
name: Full E2E
on:
schedule: [{ cron: "0 3 * * *" }]
push: { branches: [main] }
jobs:
full:
runs-on: [self-hosted, linux, x64, kvm-capable]
steps:
- uses: actions/checkout@v4
- run: cargo run -p harmony_e2e_tests -- --tier okd,kvm --verbose --report junit.xml
```
## Prerequisites Auto-Check & Install
```rust
// in harmony_e2e_tests/src/infrastructure/prerequisites.rs
async fn ensure_k3d() -> Result<()> { … } // curl | bash if missing
async fn ensure_docker() -> Result<()> { … }
fn check_kvm_support() -> Result<()> { … } // /dev/kvm + libvirt
```
## Success Criteria
### Step 1
- [ ] `harmony_e2e_tests` package created & basic CLI working
- [ ] trybuild compile-fail suite passing
- [ ] First 810 k3d examples running reliably in CI
- [ ] Mock server for Discord webhook completed
### Step 2
- [ ] All 22 k3d-compatible examples green
- [ ] OKD tier running on dedicated self-hosted runner
- [ ] JUnit reporting + GitHub check integration
- [ ] Namespace isolation + automatic retry on transient k8s errors
### Step 3
- [ ] KVM full E2E green on bare-metal runner (nightly)
- [ ] Multi-cluster examples (nats, multisite-postgres) automated
- [ ] Total fast CI time < 12 minutes on GitHub runners
- [ ] Documentation: “How to add a new tested example”
## Quick Start for New Contributors
```bash
# One-time setup
rustup update stable
cargo install trybuild cargo-nextest # optional but recommended
# Run locally (most common)
cargo run -p harmony_e2e_tests -- --tier k3d --verbose
# Just compile checks + unit
cargo test -p harmony_e2e_tests
```

2741
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,6 @@ members = [
"harmony_types",
"harmony_macros",
"harmony_tui",
"harmony_execution",
"opnsense-config",
"opnsense-config-xml",
"harmony_cli",
@@ -15,14 +14,7 @@ members = [
"harmony_composer",
"harmony_inventory_agent",
"harmony_secret_derive",
"harmony_secret",
"adr/agent_discovery/mdns",
"brocade",
"harmony_agent",
"harmony_agent/deploy",
"harmony_node_readiness",
"harmony-k8s",
"harmony_e2e_tests",
"harmony_secret", "adr/agent_discovery/mdns",
]
[workspace.package]
@@ -41,8 +33,6 @@ tokio = { version = "1.40", features = [
"macros",
"rt-multi-thread",
] }
tokio-retry = "0.3.0"
tokio-util = "0.7.15"
cidr = { features = ["serde"], version = "0.2" }
russh = "0.45"
russh-keys = "0.45"
@@ -57,7 +47,6 @@ kube = { version = "1.1.0", features = [
"jsonpatch",
] }
k8s-openapi = { version = "0.25", features = ["v1_30"] }
# TODO replace with https://github.com/bourumir-wyngs/serde-saphyr as serde_yaml is deprecated https://github.com/sebastienrousseau/serde_yml
serde_yaml = "0.9"
serde-value = "0.7"
http = "1.2"
@@ -77,12 +66,5 @@ thiserror = "2.0.14"
serde = { version = "1.0.209", features = ["derive", "rc"] }
serde_json = "1.0.127"
askama = "0.14"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
reqwest = { version = "0.12", features = [
"blocking",
"stream",
"rustls-tls",
"http2",
"json",
], default-features = false }
assertor = "0.0.4"
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }

View File

@@ -1,8 +1,4 @@
# Harmony
Open-source infrastructure orchestration that treats your platform like first-class code.
In other words, Harmony is a **next-generation platform engineering framework**.
# Harmony : Open-source infrastructure orchestration that treats your platform like first-class code
_By [NationTech](https://nationtech.io)_
@@ -22,7 +18,9 @@ All in **one strongly-typed Rust codebase**.
From a **developer laptop** to a **global production cluster**, a single **source of truth** drives the **full software lifecycle.**
## The Harmony Philosophy
---
## 1 · The Harmony Philosophy
Infrastructure is essential, but it shouldnt be your core business. Harmony is built on three guiding principles that make modern platforms reliable, repeatable, and easy to reason about.
@@ -34,18 +32,9 @@ Infrastructure is essential, but it shouldnt be your core business. Harmony i
These principles surface as simple, ergonomic Rust APIs that let teams focus on their product while trusting the platform underneath.
## Where to Start
---
We have a comprehensive set of documentation right here in the repository.
| I want to... | Start Here |
| ----------------- | ------------------------------------------------------------------ |
| Get Started | [Getting Started Guide](./docs/guides/getting-started.md) |
| See an Example | [Use Case: Deploy a Rust Web App](./docs/use-cases/rust-webapp.md) |
| Explore | [Documentation Hub](./docs/README.md) |
| See Core Concepts | [Core Concepts Explained](./docs/concepts.md) |
## Quick Look: Deploy a Rust Webapp
## 2 · Quick Start
The snippet below spins up a complete **production-grade Rust + Leptos Webapp** with monitoring. Swap it for your own scores to deploy anything from microservices to machine-learning pipelines.
@@ -103,33 +92,63 @@ async fn main() {
}
```
To run this:
Run it:
- Clone the repository: `git clone https://git.nationtech.io/nationtech/harmony`
- Install dependencies: `cargo build --release`
- Run the example: `cargo run --example try_rust_webapp`
```bash
cargo run
```
## Documentation
Harmony analyses the code, shows an execution plan in a TUI, and applies it once you confirm. Same code, same binary—every environment.
All documentation is in the `/docs` directory.
---
- [Documentation Hub](./docs/README.md): The main entry point for all documentation.
- [Core Concepts](./docs/concepts.md): A detailed look at Score, Topology, Capability, Inventory, and Interpret.
- [Component Catalogs](./docs/catalogs/README.md): Discover all available Scores, Topologies, and Capabilities.
- [Developer Guide](./docs/guides/developer-guide.md): Learn how to write your own Scores and Topologies.
## 3 · Core Concepts
## Architectural Decision Records
| Term | One-liner |
| ---------------- | ---------------------------------------------------------------------------------------------------- |
| **Score<T>** | Declarative description of the desired state (e.g., `LAMPScore`). |
| **Interpret<T>** | Imperative logic that realises a `Score` on a specific environment. |
| **Topology** | An environment (local k3d, AWS, bare-metal) exposing verified _Capabilities_ (Kubernetes, DNS, …). |
| **Maestro** | Orchestrator that compiles Scores + Topology, ensuring all capabilities line up **at compile-time**. |
| **Inventory** | Optional catalogue of physical assets for bare-metal and edge deployments. |
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
A visual overview is in the diagram below.
## Contribute
[Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg)
Discussions and roadmap live in [Issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
---
## License
## 4 · Install
Prerequisites:
- Rust
- Docker (if you deploy locally)
- `kubectl` / `helm` for Kubernetes-based topologies
```bash
git clone https://git.nationtech.io/nationtech/harmony
cd harmony
cargo build --release # builds the CLI, TUI and libraries
```
---
## 5 · Learning More
- **Architectural Decision Records** dive into the rationale
- [ADR-001 · Why Rust](adr/001-rust.md)
- [ADR-003 · Infrastructure Abstractions](adr/003-infrastructure-abstractions.md)
- [ADR-006 · Secret Management](adr/006-secret-management.md)
- [ADR-011 · Multi-Tenant Cluster](adr/011-multi-tenant-cluster.md)
- **Extending Harmony** write new Scores / Interprets, add hardware like OPNsense firewalls, or embed Harmony in your own tooling (`/docs`).
- **Community** discussions and roadmap live in [GitLab issues](https://git.nationtech.io/nationtech/harmony/-/issues). PRs, ideas, and feedback are welcome!
---
## 6 · License
Harmony is released under the **GNU AGPL v3**.

View File

@@ -1,114 +0,0 @@
# Architecture Decision Record: Higher-Order Topologies
**Initial Author:** Jean-Gabriel Gill-Couture
**Initial Date:** 2025-12-08
**Last Updated Date:** 2025-12-08
## Status
Implemented
## Context
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
- **ISP violation**: Topologies forced to impl unrelated capabilities.
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
This makes scaling Harmony impractical as ecosystem grows.
## Decision
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
````rust
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
pub struct FailoverTopology<T> {
/// Primary sub-topology.
primary: T,
/// Replica sub-topology.
replica: T,
}
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
/// Delegates to primary for queries; orchestrates deploy across both.
#[async_trait]
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
// Deploy primary; extract certs/endpoint;
// deploy replica with pg_basebackup + TLS passthrough.
// (Full impl logged/elaborated.)
}
// Delegate queries to primary.
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
self.primary.get_replication_certs(cluster_name).await
}
// ...
}
/// Similarly for other capabilities.
#[async_trait]
impl<T: Docker> Docker for FailoverTopology<T> {
// Failover Docker orchestration.
}
````
**Key properties:**
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
## Rationale
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
**Example usage:**
````rust
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
pg_failover.deploy_pg(config).await;
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
docker_failover.deploy_docker(...).await;
// ❌ Compile fail: K8sAnywhere !: Docker
let invalid: FailoverTopology<K8sAnywhereTopology>;
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
````
## Consequences
**Pros:**
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
- **Observable**: Logs trace every step.
**Cons:**
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
- **Delegation opacity**: Relies on rustdoc/logs for internals.
## Alternatives considered
| Approach | Pros | Cons |
|----------|------|------|
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
## Additional Notes
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
- `FailoverTopology` in `failover.rs` is first implementation.

View File

@@ -1,153 +0,0 @@
//! Example of Higher-Order Topologies in Harmony.
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
//! supported by a sub-topology `T` via blanket trait impls.
//!
//! Key insight: No manual impls per T or capability -- scales effortlessly.
//! Users can:
//! - Write new `Topology` (impl capabilities on a struct).
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
//! - Compile fails if capability missing (safety).
use async_trait::async_trait;
use tokio;
/// Capability trait: Deploy and manage PostgreSQL.
#[async_trait]
pub trait PostgreSQL {
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
}
/// Capability trait: Deploy Docker.
#[async_trait]
pub trait Docker {
async fn deploy_docker(&self) -> Result<String, String>;
}
/// Configuration for PostgreSQL deployments.
#[derive(Clone)]
pub struct PostgreSQLConfig;
/// Replication certificates.
#[derive(Clone)]
pub struct ReplicationCerts;
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
#[derive(Clone)]
pub struct K8sAnywhereTopology;
#[async_trait]
impl PostgreSQL for K8sAnywhereTopology {
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
// Real impl: Use k8s helm chart, operator, etc.
Ok("K8sAnywhere PostgreSQL deployed".to_string())
}
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
Ok(ReplicationCerts)
}
}
/// Concrete topology: Linux Host (supports Docker).
#[derive(Clone)]
pub struct LinuxHostTopology;
#[async_trait]
impl Docker for LinuxHostTopology {
async fn deploy_docker(&self) -> Result<String, String> {
// Real impl: Install/configure Docker on host.
Ok("LinuxHost Docker deployed".to_string())
}
}
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
/// Automatically derives *all* capabilities of `T` with failover orchestration.
///
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
/// - Same for `Docker`, etc. No boilerplate!
/// - Compile-time safe: Missing `T: Capability` → error.
#[derive(Clone)]
pub struct FailoverTopology<T> {
/// Primary sub-topology.
pub primary: T,
/// Replica sub-topology.
pub replica: T,
}
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
/// Delegates reads to primary; deploys to both.
#[async_trait]
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
let primary_result = self.primary.deploy(config).await?;
let replica_result = self.replica.deploy(config).await?;
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
}
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
// Delegate to primary (replica follows).
self.primary.get_replication_certs(cluster_name).await
}
}
/// Blanket impl: Failover Docker if T provides Docker.
#[async_trait]
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
async fn deploy_docker(&self) -> Result<String, String> {
// Orchestrate across primary + replica.
let primary_result = self.primary.deploy_docker().await?;
let replica_result = self.replica.deploy_docker().await?;
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
}
}
#[tokio::main]
async fn main() {
let config = PostgreSQLConfig;
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
let pg_failover = FailoverTopology {
primary: K8sAnywhereTopology,
replica: K8sAnywhereTopology,
};
let result = pg_failover.deploy(&config).await.unwrap();
println!("Result: {}", result);
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
let docker_failover = FailoverTopology {
primary: LinuxHostTopology,
replica: LinuxHostTopology,
};
let result = docker_failover.deploy_docker().await.unwrap();
println!("Result: {}", result);
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
// let invalid = FailoverTopology {
// primary: K8sAnywhereTopology,
// replica: K8sAnywhereTopology,
// };
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
// Very clear error message :
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
// --> src/main.rs:90:9
// |
// 4 | pub struct FailoverTopology<T> {
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
// ...
// 37 | struct K8sAnywhereTopology;
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
// ...
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
// |
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
// --> src/main.rs:61:9
// |
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
// | ^^^^^^ ------ -------------------
// | |
// | unsatisfied trait bound introduced here
// note: the trait `Docker` must be implemented
}

View File

@@ -1,90 +0,0 @@
# Architecture Decision Record: Global Orchestration Mesh & The Harmony Agent
**Status:** Proposed
**Date:** 2025-12-19
## Context
Harmony is designed to enable a truly decentralized infrastructure where independent clusters—owned by different organizations or running on diverse hardware—can collaborate reliably. This vision combines the decentralization of Web3 with the performance and capabilities of Web2.
Currently, Harmony operates as a stateless CLI tool, invoked manually or via CI runners. While effective for deployment, this model presents a critical limitation: **a CLI cannot react to real-time events.**
To achieve automated failover and dynamic workload management, we need a system that is "always on." Relying on manual intervention or scheduled CI jobs to recover from a cluster failure creates unacceptable latency and prevents us from scaling to thousands of nodes.
Furthermore, we face a challenge in serving diverse workloads:
* **Financial workloads** require absolute consistency (CP - Consistency/Partition Tolerance).
* **AI/Inference workloads** require maximum availability (AP - Availability/Partition Tolerance).
There are many more use cases, but those are the two extremes.
We need a unified architecture that automates cluster coordination and supports both consistency models without requiring a complete re-architecture in the future.
## Decision
We propose a fundamental architectural evolution. It has been clear since the start of Harmony that it would be necessary to transition Harmony from a purely ephemeral CLI tool to a system that includes a persistent **Harmony Agent**. This Agent will connect to a **Global Orchestration Mesh** based on a strongly consistent protocol.
The proposal consists of four key pillars:
### 1. The Harmony Agent (New Component)
We will develop a long-running process (Daemon/Agent) to be deployed alongside workloads.
* **Shift from CLI:** Unlike the CLI, which applies configuration and exits, the Agent maintains a persistent connection to the mesh.
* **Responsibility:** It actively monitors cluster health, participates in consensus, and executes lifecycle commands (start/stop/fence) instantly when the mesh dictates a state change.
### 2. The Technology: NATS JetStream
We will utilize **NATS JetStream** as the underlying transport and consensus layer for the Agent and the Mesh.
* **Why not raw Raft?** Implementing a raw Raft library requires building and maintaining the transport layer, log compaction, snapshotting, and peer discovery manually. NATS JetStream provides a battle-tested, distributed log and Key-Value store (based on Raft) out of the box, along with a high-performance pub/sub system for event propagation.
* **Role:** It will act as the "source of truth" for the cluster state.
### 3. Strong Consistency at the Mesh Layer
The mesh will operate with **Strong Consistency** by default.
* All critical cluster state changes (topology updates, lease acquisitions, leadership elections) will require consensus among the Agents.
* This ensures that in the event of a network partition, we have a mathematical guarantee of which side holds the valid state, preventing data corruption.
### 4. Public UX: The `FailoverStrategy` Abstraction
To keep the user experience stable and simple, we will expose the complexity of the mesh through a high-level configuration API, tentatively called `FailoverStrategy`.
The user defines the *intent* in their config, and the Harmony Agent automates the *execution*:
* **`FailoverStrategy::AbsoluteConsistency`**:
* *Use Case:* Banking, Transactional DBs.
* *Behavior:* If the mesh detects a partition, the Agent on the minority side immediately halts workloads. No split-brain is ever allowed.
* **`FailoverStrategy::SplitBrainAllowed`**:
* *Use Case:* LLM Inference, Stateless Web Servers.
* *Behavior:* If a partition occurs, the Agent keeps workloads running to maximize uptime. State is reconciled when connectivity returns.
## Rationale
**The Necessity of an Agent**
You cannot automate what you do not monitor. Moving to an Agent-based model is the only way to achieve sub-second reaction times to infrastructure failures. It transforms Harmony from a deployment tool into a self-healing platform.
**Scaling & Decentralization**
To allow independent clusters to collaborate, they need a shared language. A strongly consistent mesh allows Cluster A (Organization X) and Cluster B (Organization Y) to agree on workload placement without a central authority.
**Why Strong Consistency First?**
It is technically feasible to relax a strongly consistent system to allow for "Split Brain" behavior (AP) when the user requests it. However, it is nearly impossible to take an eventually consistent system and force it to be strongly consistent (CP) later. By starting with strict constraints, we cover the hardest use cases (Finance) immediately.
**Future Topologies**
While our immediate need is `FailoverTopology` (Multi-site), this architecture supports any future topology logic:
* **`CostTopology`**: Agents negotiate to route workloads to the cluster with the cheapest spot instances.
* **`HorizontalTopology`**: Spreading a single workload across 100 clusters for massive scale.
* **`GeoTopology`**: Ensuring data stays within specific legal jurisdictions.
The mesh provides the *capability* (consensus and messaging); the topology provides the *logic*.
## Consequences
**Positive**
* **Automation:** Eliminates manual failover, enabling massive scale.
* **Reliability:** Guarantees data safety for critical workloads by default.
* **Flexibility:** A single codebase serves both high-frequency trading and AI inference.
* **Stability:** The public API remains abstract, allowing us to optimize the mesh internals without breaking user code.
**Negative**
* **Deployment Complexity:** Users must now deploy and maintain a running service (the Agent) rather than just downloading a binary.
* **Engineering Complexity:** Integrating NATS JetStream and handling distributed state machines is significantly more complex than the current CLI logic.
## Implementation Plan (Short Term)
1. **Agent Bootstrap:** Create the initial scaffold for the Harmony Agent (daemon).
2. **Mesh Integration:** Prototype NATS JetStream embedding within the Agent.
3. **Strategy Implementation:** Add `FailoverStrategy` to the configuration schema and implement the logic in the Agent to read and act on it.
4. **Migration:** Transition the current manual failover scripts into event-driven logic handled by the Agent.

View File

@@ -1,189 +0,0 @@
### 1. ADR 017-1: NATS Cluster Interconnection & Trust Topology
# Architecture Decision Record: NATS Cluster Interconnection & Trust Topology
**Status:** Proposed
**Date:** 2026-01-12
**Precedes:** [017-Staleness-Detection-for-Failover.md]
## Context
In ADR 017, we defined the failover mechanisms for the Harmony mesh. However, for a Primary (Site A) and a Replica (Site B) to communicate securely—or for the Global Mesh to function across disparate locations—we must establish a robust Transport Layer Security (TLS) strategy.
Our primary deployment platform is OKD (Kubernetes). While OKD provides an internal `service-ca`, it is designed primarily for intra-cluster service-to-service communication. It lacks the flexibility required for:
1. **Public/External Gateway Identities:** NATS Gateways need to identify themselves via public DNS names or external IPs, not just internal `.svc` cluster domains.
2. **Cross-Cluster Trust:** We need a mechanism to allow Cluster A to trust Cluster B without sharing a single private root key.
## Decision
We will implement an **"Islands of Trust"** topology using **cert-manager** on OKD.
### 1. Per-Cluster Certificate Authorities (CA)
* We explicitly **reject** the use of a single "Supercluster CA" shared across all sites.
* Instead, every Harmony Cluster (Site A, Site B, etc.) will generate its own unique Self-Signed Root CA managed by `cert-manager` inside that cluster.
* **Lifecycle:** Root CAs will have a long duration (e.g., 10 years) to minimize rotation friction, while Leaf Certificates (NATS servers) will remain short-lived (e.g., 90 days) and rotate automatically.
> Note : The decision to have a single CA for various workloads managed by Harmony on each deployment, or to have multiple CA for each service that requires interconnection is not made yet. This ADR leans towards one CA per service. This allows for maximum flexibility. But the direction might change and no clear decision has been made yet. The alternative of establishing that each cluster/harmony deployment has a single identity could make mTLS very simple between tenants.
### 2. Trust Federation via Bundle Exchange
To enable secure communication (mTLS) between clusters (e.g., for NATS Gateways or Leaf Nodes):
* **No Private Keys are shared.**
* We will aggregate the **Public CA Certificates** of all trusted clusters into a shared `ca-bundle.pem`.
* This bundle is distributed to the NATS configuration of every node.
* **Verification Logic:** When Site A connects to Site B, Site A verifies Site B's certificate against the bundle. Since Site B's CA public key is in the bundle, the connection is accepted.
### 3. Tooling
* We will use **cert-manager** (deployed via Operator on OKD) rather than OKD's built-in `service-ca`. This provides us with standard CRDs (`Issuer`, `Certificate`) to manage the lifecycle, rotation, and complex SANs (Subject Alternative Names) required for external connectivity.
* Harmony will manage installation, configuration and bundle creation across all sites
## Rationale
**Security Blast Radius (The "Key Leak" Scenario)**
If we used a single global CA and the private key for Site A was compromised (e.g., physical theft of a server from a basement), the attacker could impersonate *any* site in the global mesh.
By using Per-Cluster CAs:
* If Site A is compromised, only Site A's identity is stolen.
* We can "evict" Site A from the mesh simply by removing Site A's Public CA from the `ca-bundle.pem` on the remaining healthy clusters and reloading. The attacker can no longer authenticate.
**Decentralized Autonomy**
This aligns with the "Humane Computing" vision. A local cluster owns its identity. It does not depend on a central authority to issue its certificates. It can function in isolation (offline) indefinitely without needing to "phone home" to renew credentials.
## Consequences
**Positive**
* **High Security:** Compromise of one node does not compromise the global mesh.
* **Flexibility:** Easier to integrate with third-party clusters or partners by simply adding their public CA to the bundle.
* **Standardization:** `cert-manager` is the industry standard, making the configuration portable to non-OKD K8s clusters if needed.
**Negative**
* **Configuration Complexity:** We must manage a mechanism to distribute the `ca-bundle.pem` containing public keys to all sites. This should be automated (e.g., via a Harmony Agent) to ensure timely updates and revocation.
* **Revocation Latency:** Revoking a compromised cluster requires updating and reloading the bundle on all other clusters. This is slower than OCSP/CRL but acceptable for infrastructure-level trust if automation is in place.
---
# 2. Concrete overview of the process, how it can be implemented manually across multiple OKD clusters
All of this will be automated via Harmony, but to understand correctly the process it is outlined in details here :
## 1. Deploying and Configuring cert-manager on OKD
While OKD has a built-in `service-ca` controller, it is "opinionated" and primarily signs certs for internal services (like `my-svc.my-namespace.svc`). It is **not suitable** for the Harmony Global Mesh because you cannot easily control the Subject Alternative Names (SANs) for external routes (e.g., `nats.site-a.nationtech.io`), nor can you easily export its CA to other clusters.
**The Solution:** Use the **cert-manager Operator for Red Hat OpenShift**.
### Step 1: Install the Operator
1. Log in to the OKD Web Console.
2. Navigate to **Operators** -> **OperatorHub**.
3. Search for **"cert-manager"**.
4. Choose the **"cert-manager Operator for Red Hat OpenShift"** (Red Hat provided) or the community version.
5. Click **Install**. Use the default settings (Namespace: `cert-manager-operator`).
### Step 2: Create the "Island" CA (The Issuer)
Once installed, you define your cluster's unique identity. Apply this YAML to your NATS namespace.
```yaml
# filepath: k8s/01-issuer.yaml
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: harmony-selfsigned-issuer
namespace: harmony-nats
spec:
selfSigned: {}
---
# This generates the unique Root CA for THIS specific cluster
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: harmony-root-ca
namespace: harmony-nats
spec:
isCA: true
commonName: "harmony-site-a-ca" # CHANGE THIS per cluster (e.g., site-b-ca)
duration: 87600h # 10 years
renewBefore: 2160h # 3 months before expiry
secretName: harmony-root-ca-secret
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: harmony-selfsigned-issuer
kind: Issuer
group: cert-manager.io
---
# This Issuer uses the Root CA generated above to sign NATS certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: harmony-ca-issuer
namespace: harmony-nats
spec:
ca:
secretName: harmony-root-ca-secret
```
### Step 3: Generate the NATS Server Certificate
This certificate will be used by the NATS server. It includes both internal DNS names (for local clients) and external DNS names (for the global mesh).
```yaml
# filepath: k8s/02-nats-cert.yaml
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: nats-server-cert
namespace: harmony-nats
spec:
secretName: nats-server-tls
duration: 2160h # 90 days
renewBefore: 360h # 15 days
issuerRef:
name: harmony-ca-issuer
kind: Issuer
# CRITICAL: Define all names this server can be reached by
dnsNames:
- "nats"
- "nats.harmony-nats.svc"
- "nats.harmony-nats.svc.cluster.local"
- "*.nats.harmony-nats.svc.cluster.local"
- "nats-gateway.site-a.nationtech.io" # External Route for Mesh
```
## 2. Implementing the "Islands of Trust" (Trust Bundle)
To make Site A and Site B talk, you need to exchange **Public Keys**.
1. **Extract Public CA from Site A:**
```bash
oc get secret harmony-root-ca-secret -n harmony-nats -o jsonpath='{.data.ca\.crt}' | base64 -d > site-a.crt
```
2. **Extract Public CA from Site B:**
```bash
oc get secret harmony-root-ca-secret -n harmony-nats -o jsonpath='{.data.ca\.crt}' | base64 -d > site-b.crt
```
3. **Create the Bundle:**
Combine them into one file.
```bash
cat site-a.crt site-b.crt > ca-bundle.crt
```
4. **Upload Bundle to Both Clusters:**
Create a ConfigMap or Secret in *both* clusters containing this combined bundle.
```bash
oc create configmap nats-trust-bundle --from-file=ca.crt=ca-bundle.crt -n harmony-nats
```
5. **Configure NATS:**
Mount this ConfigMap and point NATS to it.
```conf
# nats.conf snippet
tls {
cert_file: "/etc/nats-certs/tls.crt"
key_file: "/etc/nats-certs/tls.key"
# Point to the bundle containing BOTH Site A and Site B public CAs
ca_file: "/etc/nats-trust/ca.crt"
}
```
This setup ensures that Site A can verify Site B's certificate (signed by `harmony-site-b-ca`) because Site B's CA is in Site A's trust store, and vice versa, without ever sharing the private keys that generated them.

View File

@@ -1,141 +0,0 @@
# Architecture Decision Record: Template Hydration for Kubernetes Manifest Generation
Initial Author: Jean-Gabriel Gill-Couture & Sylvain Tremblay
Initial Date: 2025-01-23
Last Updated Date: 2025-01-23
## Status
Implemented
## Context
Harmony's philosophy is built on three guiding principles: Infrastructure as Resilient Code, Prove It Works — Before You Deploy, and One Unified Model. Our goal is to shift validation and verification as left as possible—ideally to compile time—rather than discovering errors at deploy time.
After investigating a few approaches such as compile-checked Askama templates to generate Kubernetes manifests for Helm charts, we found again that this approach suffered from several fundamental limitations:
* **Late Validation:** Typos in template syntax or field names are only discovered at deployment time, not during compilation. A mistyped `metadata.name` won't surface until Helm attempts to render the template.
* **Brittle Maintenance:** Templates are string-based with limited IDE support. Refactoring requires grep-and-replace across YAML-like template files, risking subtle breakage.
* **Hard-to-Test Logic:** Testing template output requires mocking the template engine and comparing serialized strings rather than asserting against typed data structures.
* **No Type Safety:** There is no guarantee that the generated YAML will be valid Kubernetes resources without runtime validation.
We also faced a strategic choice around Helm: use it as both *templating engine* and *packaging mechanism*, or decouple these concerns. While Helm's ecosystem integration (Harbor, ArgoCD, OCI registry support) is valuable, the Jinja-like templating is at odds with Harmony's "code-first" ethos.
## Decision
We will adopt the **Template Hydration Pattern**—constructing Kubernetes manifests programmatically using strongly-typed `kube-rs` objects, then serializing them to YAML files for packaging into Helm charts.
Specifically:
* **Write strongly typed `k8s_openapi` Structs:** All Kubernetes resources (Deployment, Service, ConfigMap, etc.) will be constructed using the typed structs generated by `k8s_openapi`.
* **Direct Serialization to YAML:** Rather than rendering templates, we use `serde_yaml::to_string()` to serialize typed objects directly into YAML manifests. This way, YAML is only used as a data-transfer format and not a templating/programming language - which it is not.
* **Helm as Packaging-Only:** Helm's role is reduced to packaging pre-rendered templates into a tarball and pushing to OCI registries. No template rendering logic resides within Helm.
* **Ecosystem Preservation:** The generated Helm charts remain fully compatible with Harbor, ArgoCD, and any Helm-compatible tool—the only difference is that the `templates/` directory contains static YAML files.
The implementation in `backend_app.rs` demonstrates this pattern:
```rust
let deployment = Deployment {
metadata: ObjectMeta {
name: Some(self.name.clone()),
labels: Some([("app.kubernetes.io/name".to_string(), self.name.clone())].into()),
..Default::default()
},
spec: Some(DeploymentSpec { /* ... */ }),
..Default::default()
};
let deployment_yaml = serde_yaml::to_string(&deployment)?;
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
```
## Rationale
**Aligns with "Infrastructure as Resilient Code"**
Harmony's first principle states that infrastructure should be treated like application code. By expressing Kubernetes manifests as Rust structs, we gain:
* **Refactorability:** Rename a label and the compiler catches all usages.
* **IDE Support:** Autocomplete for all Kubernetes API fields; documentation inline.
* **Code Navigation:** Jump to definition shows exactly where a value comes from.
**Achieves "Prove It Works — Before You Deploy"**
The compiler now validates that:
* All required fields are populated (Rust's `Option` type prevents missing fields).
* Field types match expectations (ports are integers, not strings).
* Enums contain valid values (e.g., `ServiceType::ClusterIP`).
This moves what was runtime validation into compile-time checks, fulfilling the "shift left" promise.
**Enables True Unit Testing**
Developers can now write unit tests that assert directly against typed objects:
```rust
let deployment = create_deployment(&app);
assert_eq!(deployment.spec.unwrap().replicas.unwrap(), 3);
assert_eq!(deployment.metadata.name.unwrap(), "my-app");
```
No string parsing, no YAML serialization, no fragile assertions against rendered output.
**Preserves Ecosystem Benefits**
By generating standard Helm chart structures, Harmony retains compatibility with:
* **OCI Registries (Harbor, GHCR):** `helm push` works exactly as before.
* **ArgoCD:** Syncs and manages releases using the generated charts.
* **Existing Workflows:** Teams already consuming Helm charts see no change.
The Helm tarball becomes a "dumb pipe" for transport, which is arguably its ideal role.
## Consequences
### Positive
* **Compile-Time Safety:** A broad class of errors (typos, missing fields, type mismatches) is now caught at build time.
* **Better Developer Experience:** IDE autocomplete, inline documentation, and refactor support significantly reduce the learning curve for Kubernetes manifests.
* **Testability:** Unit tests can validate manifest structure without integration or runtime checks.
* **Auditability:** The source-of-truth for manifests is now pure Rust—easier to review in pull requests than template logic scattered across files.
* **Future-Extensibility:** CustomResources (CRDs) can be supported via `kopium`-generated Rust types, maintaining the same strong typing.
### Negative
* **API Schema Drift:** Kubernetes API changes require regenerating `k8s_openapi` types and updating code. A change in a struct field will cause the build to fail—intentionally, but still requiring the pipeline to be updated.
* **Verbosity:** Typed construction is more verbose than the equivalent template. Builder patterns or helper functions will be needed to keep code readable.
* **Learning Curve:** Contributors must understand both the Kubernetes resource spec *and* the Rust type system, rather than just YAML.
* **Debugging Shift:** When debugging generated YAML, you now trace through Rust code rather than template files—more precise but different mental model.
## Alternatives Considered
### 1. Enhance Askama with Compile-Time Validation
*Pros:* Stay within familiar templating paradigm; minimal code changes.
*Cons:* Rust's type system cannot fully express Kubernetes schema validation without significant macro boilerplate. Errors would still surface at template evaluation time, not compilation.
### 2. Use Helm SDK Programmatically (Go)
*Pros:* Direct access to Helm's template engine; no YAML serialization step.
*Cons:* Would introduce a second language (Go) into a Rust codebase, increasing cognitive load and compilation complexity. No improvement in compile-time safety.
### 3. Raw YAML String Templating (Manual)
*Pros:* Maximum control; no external dependencies.
*Cons:* Even more error-prone than Askama; no structure validation; string concatenation errors abound.
### 4. Use Kustomize for All Manifests
*Pros:* Declarative overlays; standard tool.
*Cons:* Kustomize is itself a layer over YAML templates with its own DSL. It does not provide compile-time type safety and would require externalizing manifest management outside Harmony's codebase.
__Note that this template hydration architecture still allows to override templates with tools like kustomize when required__
## Additional Notes
**Scalability to Future Topologies**
The Template Hydration pattern enables future Harmony architectures to generate manifests dynamically based on topology context. For example, a `CostTopology` might adjust resource requests based on cluster pricing, manipulating the typed `Deployment::spec` directly before serialization.
**Implementation Status**
As of this writing, the pattern is implemented for `BackendApp` deployments (`backend_app.rs`). The next phase is to extend this pattern across all application modules (`webapp.rs`, etc.) and to standardize on this approach for any new implementations.

View File

@@ -1,65 +0,0 @@
# Architecture Decision Record: Network Bonding Configuration via External Automation
Initial Author: Jean-Gabriel Gill-Couture & Sylvain Tremblay
Initial Date: 2026-02-13
Last Updated Date: 2026-02-13
## Status
Accepted
## Context
We need to configure LACP bonds on 10GbE interfaces across all worker nodes in the OpenShift cluster. A significant challenge is that interface names (e.g., `enp1s0f0` vs `ens1f0`) vary across different hardware nodes.
The standard OpenShift mechanism (MachineConfig) applies identical configurations to all nodes in a MachineConfigPool. Since the interface names differ, a single static MachineConfig cannot target specific physical devices across the entire cluster without complex workarounds.
## Decision
We will use the existing "Harmony" automation tool to generate and apply host-specific NetworkManager configuration files directly to the nodes.
1. Harmony will generate the specific `.nmconnection` files for the bond and slaves based on its inventory of interface names.
2. Files will be pushed to `/etc/NetworkManager/system-connections/` on each node.
3. Configuration will be applied via `nmcli` reload or a node reboot.
## Rationale
* **Inventory Awareness:** Harmony already possesses the specific interface mapping data for each host.
* **Persistence:** Fedora CoreOS/SCOS allows writing to `/etc`, and these files persist across reboots and OS upgrades (rpm-ostree updates).
* **Avoids Complexity:** This approach avoids the operational overhead of creating unique MachineConfigPools for every single host or hardware variant.
* **Safety:** Unlike wildcard matching, this ensures explicit interface selection, preventing accidental bonding of reserved interfaces (e.g., future separation of Ceph storage traffic).
## Consequences
**Pros:**
* Precise, per-host configuration without polluting the Kubernetes API with hundreds of MachineConfigs.
* Standard Linux networking behavior; easy to debug locally.
* Prevents accidental interface capture (unlike wildcards).
**Cons:**
* **Loss of Declarative K8s State:** The network config is not managed by the Machine Config Operator (MCO).
* **Node Replacement Friction:** Newly provisioned nodes (replacements) will boot with default config. Harmony must be run against new nodes manually or via a hook before they can fully join the cluster workload.
## Alternatives considered
1. **Wildcard Matching in NetworkManager (e.g., `interface-name=enp*`):**
* *Pros:* Single MachineConfig for the whole cluster.
* *Cons:* Rejected because it is too broad. It risks capturing interfaces intended for other purposes (e.g., splitting storage and cluster networks later).
2. **"Kitchen Sink" Configuration:**
* *Pros:* Single file listing every possible interface name as a slave.
* *Cons:* "Dirty" configuration; results in many inactive connections on every host; brittle if new naming schemes appear.
3. **Per-Host MachineConfig:**
* *Pros:* Fully declarative within OpenShift.
* *Cons:* Requires a unique `MachineConfigPool` per host, which is an anti-pattern and unmaintainable at scale.
4. **On-boot Generation Script:**
* *Pros:* Dynamic detection.
* *Cons:* Increases boot complexity; harder to debug if the script fails during startup.
## Additional Notes
While `/etc` is writable and persistent on CoreOS, this configuration falls outside the "Day 1" Ignition process. Operational runbooks must be updated to ensure Harmony runs on any node replacement events.

View File

@@ -1,318 +0,0 @@
# Architecture Decision Record: Monitoring and Alerting Architecture
Initial Author: Willem Rolleman, Jean-Gabriel Carrier
Initial Date: March 9, 2026
Last Updated Date: March 9, 2026
## Status
Accepted
Supersedes: [ADR-010](010-monitoring-and-alerting.md)
## Context
Harmony needs a unified approach to monitoring and alerting across different infrastructure targets:
1. **Cluster-level monitoring**: Administrators managing entire Kubernetes/OKD clusters need to define cluster-wide alerts, receivers, and scrape targets.
2. **Tenant-level monitoring**: Multi-tenant clusters where teams are confined to namespaces need monitoring scoped to their resources.
3. **Application-level monitoring**: Developers deploying applications want zero-config monitoring that "just works" for their services.
The monitoring landscape is fragmented:
- **OKD/OpenShift**: Built-in Prometheus with AlertmanagerConfig CRDs
- **KubePrometheus**: Helm-based stack with PrometheusRule CRDs
- **RHOB (Red Hat Observability)**: Operator-based with MonitoringStack CRDs
- **Standalone Prometheus**: Raw Prometheus deployments
Each system has different CRDs, different installation methods, and different configuration APIs.
## Decision
We implement a **trait-based architecture with compile-time capability verification** that provides:
1. **Type-safe abstractions** via parameterized traits: `AlertReceiver<S>`, `AlertRule<S>`, `ScrapeTarget<S>`
2. **Compile-time topology compatibility** via the `Observability<S>` capability bound
3. **Three levels of abstraction**: Cluster, Tenant, and Application monitoring
4. **Pre-built alert rules** as functions that return typed structs
### Core Traits
```rust
// domain/topology/monitoring.rs
/// Marker trait for systems that send alerts (Prometheus, etc.)
pub trait AlertSender: Send + Sync + std::fmt::Debug {
fn name(&self) -> String;
}
/// Defines how a receiver (Discord, Slack, etc.) builds its configuration
/// for a specific sender type
pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
}
/// Defines how an alert rule builds its PrometheusRule configuration
pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
fn build_rule(&self) -> Result<serde_json::Value, InterpretError>;
fn name(&self) -> String;
fn clone_box(&self) -> Box<dyn AlertRule<S>>;
}
/// Capability that topologies implement to support monitoring
pub trait Observability<S: AlertSender> {
async fn install_alert_sender(&self, sender: &S, inventory: &Inventory)
-> Result<PreparationOutcome, PreparationError>;
async fn install_receivers(&self, sender: &S, inventory: &Inventory,
receivers: Option<Vec<Box<dyn AlertReceiver<S>>>>) -> Result<...>;
async fn install_rules(&self, sender: &S, inventory: &Inventory,
rules: Option<Vec<Box<dyn AlertRule<S>>>>) -> Result<...>;
async fn add_scrape_targets(&self, sender: &S, inventory: &Inventory,
scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>) -> Result<...>;
async fn ensure_monitoring_installed(&self, sender: &S, inventory: &Inventory)
-> Result<...>;
}
```
### Alert Sender Types
Each monitoring stack is a distinct `AlertSender`:
| Sender | Module | Use Case |
|--------|--------|----------|
| `OpenshiftClusterAlertSender` | `monitoring/okd/` | OKD/OpenShift built-in monitoring |
| `KubePrometheus` | `monitoring/kube_prometheus/` | Helm-deployed kube-prometheus-stack |
| `Prometheus` | `monitoring/prometheus/` | Standalone Prometheus via Helm |
| `RedHatClusterObservability` | `monitoring/red_hat_cluster_observability/` | RHOB operator |
| `Grafana` | `monitoring/grafana/` | Grafana-managed alerting |
### Three Levels of Monitoring
#### 1. Cluster-Level Monitoring
For cluster administrators. Full control over monitoring infrastructure.
```rust
// examples/okd_cluster_alerts/src/main.rs
OpenshiftClusterAlertScore {
sender: OpenshiftClusterAlertSender,
receivers: vec![Box::new(DiscordReceiver { ... })],
rules: vec![Box::new(alert_rules)],
scrape_targets: Some(vec![Box::new(external_exporters)]),
}
```
**Characteristics:**
- Cluster-scoped CRDs and resources
- Can add external scrape targets (outside cluster)
- Manages Alertmanager configuration
- Requires cluster-admin privileges
#### 2. Tenant-Level Monitoring
For teams confined to namespaces. The topology determines tenant context.
```rust
// The topology's Observability impl handles namespace scoping
impl Observability<KubePrometheus> for K8sAnywhereTopology {
async fn install_rules(&self, sender: &KubePrometheus, ...) {
// Topology knows if it's tenant-scoped
let namespace = self.get_tenant_config().await
.map(|t| t.name)
.unwrap_or("default");
// Install rules in tenant namespace
}
}
```
**Characteristics:**
- Namespace-scoped resources
- Cannot modify cluster-level monitoring config
- May have restricted receiver types
- Runtime validation of permissions (cannot be fully compile-time)
#### 3. Application-Level Monitoring
For developers. Zero-config, opinionated monitoring.
```rust
// modules/application/features/monitoring.rs
pub struct Monitoring {
pub application: Arc<dyn Application>,
pub alert_receiver: Vec<Box<dyn AlertReceiver<Prometheus>>>,
}
impl<T: Topology + Observability<Prometheus> + TenantManager + ...>
ApplicationFeature<T> for Monitoring
{
async fn ensure_installed(&self, topology: &T) -> Result<...> {
// Auto-creates ServiceMonitor
// Auto-installs Ntfy for notifications
// Handles tenant namespace automatically
// Wires up sensible defaults
}
}
```
**Characteristics:**
- Automatic ServiceMonitor creation
- Opinionated notification channel (Ntfy)
- Tenant-aware via topology
- Minimal configuration required
## Rationale
### Why Generic Traits Instead of Unified Types?
Each monitoring stack (OKD, KubePrometheus, RHOB) has fundamentally different CRDs:
```rust
// OKD uses AlertmanagerConfig with different structure
AlertmanagerConfig { spec: { receivers: [...] } }
// RHOB uses secret references for webhook URLs
MonitoringStack { spec: { alertmanagerConfig: { discordConfigs: [{ apiURL: { key: "..." } }] } } }
// KubePrometheus uses Alertmanager CRD with different field names
Alertmanager { spec: { config: { receivers: [...] } } }
```
A unified type would either:
1. Be a lowest-common-denominator (loses stack-specific features)
2. Be a complex union type (hard to use, easy to misconfigure)
Generic traits let each stack express its configuration naturally while providing a consistent interface.
### Why Compile-Time Capability Bounds?
```rust
impl<T: Topology + Observability<OpenshiftClusterAlertSender>> Score<T>
for OpenshiftClusterAlertScore { ... }
```
This fails at compile time if you try to use `OpenshiftClusterAlertScore` with a topology that doesn't support OKD monitoring. This prevents the "config-is-valid-but-platform-is-wrong" errors that Harmony was designed to eliminate.
### Why Not a MonitoringStack Abstraction (V2 Approach)?
The V2 approach proposed a unified `MonitoringStack` that hides sender selection:
```rust
// V2 approach - rejected
MonitoringStack::new(MonitoringApiVersion::V2CRD)
.add_alert_channel(discord)
```
**Problems:**
1. Hides which sender you're using, losing compile-time guarantees
2. "Version selection" actually chooses between fundamentally different systems
3. Would need to handle all stack-specific features through a generic interface
The current approach is explicit: you choose `OpenshiftClusterAlertSender` and the compiler verifies compatibility.
### Why Runtime Validation for Tenants?
Tenant confinement is determined at runtime by the topology and K8s RBAC. We cannot know at compile time whether a user has cluster-admin or namespace-only access.
Options considered:
1. **Compile-time tenant markers** - Would require modeling entire RBAC hierarchy in types. Over-engineering.
2. **Runtime validation** - Current approach. Fails with clear K8s permission errors if insufficient access.
3. **No tenant support** - Would exclude a major use case.
Runtime validation is the pragmatic choice. The failure mode is clear (K8s API error) and occurs early in execution.
> Note : we will eventually have compile time validation for such things. Rust macros are powerful and we could discover the actual capabilities we're dealing with, similar to sqlx approach in query! macros.
## Consequences
### Pros
1. **Type Safety**: Invalid configurations are caught at compile time
2. **Extensibility**: Adding a new monitoring stack requires implementing traits, not modifying core code
3. **Clear Separation**: Cluster/Tenant/Application levels have distinct entry points
4. **Reusable Rules**: Pre-built alert rules as functions (`high_pvc_fill_rate_over_two_days()`)
5. **CRD Accuracy**: Type definitions match actual Kubernetes CRDs exactly
### Cons
1. **Implementation Explosion**: `DiscordReceiver` implements `AlertReceiver<S>` for each sender type (3+ implementations)
2. **Learning Curve**: Understanding the trait hierarchy takes time
3. **clone_box Boilerplate**: Required for trait object cloning (3 lines per impl)
### Mitigations
- Implementation explosion is contained: each receiver type has O(senders) implementations, but receivers are rare compared to rules
- Learning curve is documented with examples at each level
- clone_box boilerplate is minimal and copy-paste
## Alternatives Considered
### Unified MonitoringStack Type
See "Why Not a MonitoringStack Abstraction" above. Rejected for losing compile-time safety.
### Helm-Only Approach
Use `HelmScore` directly for each monitoring deployment. Rejected because:
- No type safety for alert rules
- Cannot compose with application features
- No tenant awareness
### Separate Modules Per Use Case
Have `cluster_monitoring/`, `tenant_monitoring/`, `app_monitoring/` as separate modules. Rejected because:
- Massive code duplication
- No shared abstraction for receivers/rules
- Adding a feature requires three implementations
## Implementation Notes
### Module Structure
```
modules/monitoring/
├── mod.rs # Public exports
├── alert_channel/ # Receivers (Discord, Webhook)
├── alert_rule/ # Rules and pre-built alerts
│ ├── prometheus_alert_rule.rs
│ └── alerts/ # Library of pre-built rules
│ ├── k8s/ # K8s-specific (pvc, pod, memory)
│ └── infra/ # Infrastructure (opnsense, dell)
├── okd/ # OpenshiftClusterAlertSender
├── kube_prometheus/ # KubePrometheus
├── prometheus/ # Prometheus
├── red_hat_cluster_observability/ # RHOB
├── grafana/ # Grafana
├── application_monitoring/ # Application-level scores
└── scrape_target/ # External scrape targets
```
### Adding a New Alert Sender
1. Create sender type: `pub struct MySender; impl AlertSender for MySender { ... }`
2. Implement `Observability<MySender>` for topologies that support it
3. Create CRD types in `crd/` subdirectory
4. Implement `AlertReceiver<MySender>` for existing receivers
5. Implement `AlertRule<MySender>` for `AlertManagerRuleGroup`
### Adding a New Alert Rule
```rust
pub fn my_custom_alert() -> PrometheusAlertRule {
PrometheusAlertRule::new("MyAlert", "up == 0")
.for_duration("5m")
.label("severity", "critical")
.annotation("summary", "Service is down")
}
```
No trait implementation needed - `AlertManagerRuleGroup` already handles conversion.
## Related ADRs
- [ADR-013](013-monitoring-notifications.md): Notification channel selection (ntfy)
- [ADR-011](011-multi-tenant-cluster.md): Multi-tenant cluster architecture

View File

@@ -1,21 +0,0 @@
[package]
name = "example-monitoring-v2"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony-k8s = { path = "../../harmony-k8s" }
harmony_types = { path = "../../harmony_types" }
kube = { workspace = true }
schemars = "0.8"
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
url = { workspace = true }
log = { workspace = true }
async-trait = { workspace = true }
k8s-openapi = { workspace = true }

View File

@@ -1,91 +0,0 @@
# Monitoring v2 - Improved Architecture
This example demonstrates the improved monitoring architecture that addresses the "WTF/minute" issues in the original design.
## Key Improvements
### 1. **Single AlertChannel Trait with Generic Sender**
The original design required 9-12 implementations for each alert channel (Discord, Webhook, etc.) - one for each sender type. The new design uses a single trait with generic sender parameterization:
pub trait AlertChannel<Sender: AlertSender> {
async fn install_config(&self, sender: &Sender) -> Result<Outcome, InterpretError>;
fn name(&self) -> String;
fn as_any(&self) -> &dyn std::any::Any;
}
**Benefits:**
- One Discord implementation works with all sender types
- Type safety at compile time
- No runtime dispatch overhead
### 2. **MonitoringStack Abstraction**
Instead of manually selecting CRDPrometheus vs KubePrometheus vs RHOBObservability, you now have a unified MonitoringStack that handles versioning:
let monitoring_stack = MonitoringStack::new(MonitoringApiVersion::V2CRD)
.set_namespace("monitoring")
.add_alert_channel(discord_receiver)
.set_scrape_targets(vec![...]);
**Benefits:**
- Single source of truth for monitoring configuration
- Easy to switch between monitoring versions
- Automatic version-specific configuration
### 3. **TenantMonitoringScore - True Composition**
The original monitoring_with_tenant example just put tenant and monitoring as separate items in a vec. The new design truly composes them:
let tenant_score = TenantMonitoringScore::new("test-tenant", monitoring_stack);
This creates a single score that:
- Has tenant context
- Has monitoring configuration
- Automatically installs monitoring scoped to tenant namespace
**Benefits:**
- No more "two separate things" confusion
- Automatic tenant namespace scoping
- Clear ownership: tenant owns its monitoring
### 4. **Versioned Monitoring APIs**
Clear versioning makes it obvious which monitoring stack you're using:
pub enum MonitoringApiVersion {
V1Helm, // Old Helm charts
V2CRD, // Current CRDs
V3RHOB, // RHOB (future)
}
**Benefits:**
- No guessing which API version you're using
- Easy to migrate between versions
- Backward compatibility path
## Comparison
### Original Design (monitoring_with_tenant)
- Manual selection of each component
- Manual installation of both components
- Need to remember to pass both to harmony_cli::run
- Monitoring not scoped to tenant automatically
### New Design (monitoring_v2)
- Single composed score
- One score does it all
## Usage
cd examples/monitoring_v2
cargo run
## Migration Path
To migrate from the old design to the new:
1. Replace individual alert channel implementations with AlertChannel<Sender>
2. Use MonitoringStack instead of manual *Prometheus selection
3. Use TenantMonitoringScore instead of separate TenantScore + monitoring scores
4. Select monitoring version via MonitoringApiVersion

View File

@@ -1,343 +0,0 @@
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use log::debug;
use serde::{Deserialize, Serialize};
use serde_yaml::{Mapping, Value};
use harmony::data::Version;
use harmony::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
use harmony::inventory::Inventory;
use harmony::score::Score;
use harmony::topology::{Topology, tenant::TenantManager};
use harmony_k8s::K8sClient;
use harmony_types::k8s_name::K8sName;
use harmony_types::net::Url;
pub trait AlertSender: Send + Sync + std::fmt::Debug {
fn name(&self) -> String;
fn namespace(&self) -> String;
}
#[derive(Debug)]
pub struct CRDPrometheus {
pub namespace: String,
pub client: Arc<K8sClient>,
}
impl AlertSender for CRDPrometheus {
fn name(&self) -> String {
"CRDPrometheus".to_string()
}
fn namespace(&self) -> String {
self.namespace.clone()
}
}
#[derive(Debug)]
pub struct RHOBObservability {
pub namespace: String,
pub client: Arc<K8sClient>,
}
impl AlertSender for RHOBObservability {
fn name(&self) -> String {
"RHOBObservability".to_string()
}
fn namespace(&self) -> String {
self.namespace.clone()
}
}
#[derive(Debug)]
pub struct KubePrometheus {
pub config: Arc<Mutex<KubePrometheusConfig>>,
}
impl Default for KubePrometheus {
fn default() -> Self {
Self::new()
}
}
impl KubePrometheus {
pub fn new() -> Self {
Self {
config: Arc::new(Mutex::new(KubePrometheusConfig::new())),
}
}
}
impl AlertSender for KubePrometheus {
fn name(&self) -> String {
"KubePrometheus".to_string()
}
fn namespace(&self) -> String {
self.config.lock().unwrap().namespace.clone().unwrap_or_else(|| "monitoring".to_string())
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KubePrometheusConfig {
pub namespace: Option<String>,
#[serde(skip)]
pub alert_receiver_configs: Vec<AlertManagerChannelConfig>,
}
impl KubePrometheusConfig {
pub fn new() -> Self {
Self {
namespace: None,
alert_receiver_configs: Vec::new(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertManagerChannelConfig {
pub channel_receiver: serde_yaml::Value,
pub channel_route: serde_yaml::Value,
}
impl Default for AlertManagerChannelConfig {
fn default() -> Self {
Self {
channel_receiver: serde_yaml::Value::Mapping(Default::default()),
channel_route: serde_yaml::Value::Mapping(Default::default()),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScrapeTargetConfig {
pub service_name: String,
pub port: String,
pub path: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MonitoringApiVersion {
V1Helm,
V2CRD,
V3RHOB,
}
#[derive(Debug, Clone)]
pub struct MonitoringStack {
pub version: MonitoringApiVersion,
pub namespace: String,
pub alert_channels: Vec<Arc<dyn AlertSender>>,
pub scrape_targets: Vec<ScrapeTargetConfig>,
}
impl MonitoringStack {
pub fn new(version: MonitoringApiVersion) -> Self {
Self {
version,
namespace: "monitoring".to_string(),
alert_channels: Vec::new(),
scrape_targets: Vec::new(),
}
}
pub fn set_namespace(mut self, namespace: &str) -> Self {
self.namespace = namespace.to_string();
self
}
pub fn add_alert_channel(mut self, channel: impl AlertSender + 'static) -> Self {
self.alert_channels.push(Arc::new(channel));
self
}
pub fn set_scrape_targets(mut self, targets: Vec<(&str, &str, String)>) -> Self {
self.scrape_targets = targets
.into_iter()
.map(|(name, port, path)| ScrapeTargetConfig {
service_name: name.to_string(),
port: port.to_string(),
path,
})
.collect();
self
}
}
pub trait AlertChannel<Sender: AlertSender> {
fn install_config(&self, sender: &Sender);
fn name(&self) -> String;
}
#[derive(Debug, Clone)]
pub struct DiscordWebhook {
pub name: K8sName,
pub url: Url,
pub selectors: Vec<HashMap<String, String>>,
}
impl DiscordWebhook {
fn get_config(&self) -> AlertManagerChannelConfig {
let mut route = Mapping::new();
route.insert(
Value::String("receiver".to_string()),
Value::String(self.name.to_string()),
);
route.insert(
Value::String("matchers".to_string()),
Value::Sequence(vec![Value::String("alertname!=Watchdog".to_string())]),
);
let mut receiver = Mapping::new();
receiver.insert(
Value::String("name".to_string()),
Value::String(self.name.to_string()),
);
let mut discord_config = Mapping::new();
discord_config.insert(
Value::String("webhook_url".to_string()),
Value::String(self.url.to_string()),
);
receiver.insert(
Value::String("discord_configs".to_string()),
Value::Sequence(vec![Value::Mapping(discord_config)]),
);
AlertManagerChannelConfig {
channel_receiver: Value::Mapping(receiver),
channel_route: Value::Mapping(route),
}
}
}
impl AlertChannel<CRDPrometheus> for DiscordWebhook {
fn install_config(&self, sender: &CRDPrometheus) {
debug!("Installing Discord webhook for CRDPrometheus in namespace: {}", sender.namespace());
debug!("Config: {:?}", self.get_config());
debug!("Installed!");
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
}
impl AlertChannel<RHOBObservability> for DiscordWebhook {
fn install_config(&self, sender: &RHOBObservability) {
debug!("Installing Discord webhook for RHOBObservability in namespace: {}", sender.namespace());
debug!("Config: {:?}", self.get_config());
debug!("Installed!");
}
fn name(&self) -> String {
"webhook-receiver".to_string()
}
}
impl AlertChannel<KubePrometheus> for DiscordWebhook {
fn install_config(&self, sender: &KubePrometheus) {
debug!("Installing Discord webhook for KubePrometheus in namespace: {}", sender.namespace());
let config = sender.config.lock().unwrap();
let ns = config.namespace.clone().unwrap_or_else(|| "monitoring".to_string());
debug!("Namespace: {}", ns);
let mut config = sender.config.lock().unwrap();
config.alert_receiver_configs.push(self.get_config());
debug!("Installed!");
}
fn name(&self) -> String {
"discord-webhook".to_string()
}
}
fn default_monitoring_stack() -> MonitoringStack {
MonitoringStack::new(MonitoringApiVersion::V2CRD)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TenantMonitoringScore {
pub tenant_id: harmony_types::id::Id,
pub tenant_name: String,
#[serde(skip)]
#[serde(default = "default_monitoring_stack")]
pub monitoring_stack: MonitoringStack,
}
impl TenantMonitoringScore {
pub fn new(tenant_name: &str, monitoring_stack: MonitoringStack) -> Self {
Self {
tenant_id: harmony_types::id::Id::default(),
tenant_name: tenant_name.to_string(),
monitoring_stack,
}
}
}
impl<T: Topology + TenantManager> Score<T> for TenantMonitoringScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(TenantMonitoringInterpret {
score: self.clone(),
})
}
fn name(&self) -> String {
format!("{} monitoring [TenantMonitoringScore]", self.tenant_name)
}
}
#[derive(Debug)]
pub struct TenantMonitoringInterpret {
pub score: TenantMonitoringScore,
}
#[async_trait::async_trait]
impl<T: Topology + TenantManager> Interpret<T> for TenantMonitoringInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let tenant_config = topology.get_tenant_config().await.unwrap();
let tenant_ns = tenant_config.name.clone();
match self.score.monitoring_stack.version {
MonitoringApiVersion::V1Helm => {
debug!("Installing Helm monitoring for tenant {}", tenant_ns);
}
MonitoringApiVersion::V2CRD => {
debug!("Installing CRD monitoring for tenant {}", tenant_ns);
}
MonitoringApiVersion::V3RHOB => {
debug!("Installing RHOB monitoring for tenant {}", tenant_ns);
}
}
Ok(Outcome::success(format!(
"Installed monitoring stack for tenant {} with version {:?}",
self.score.tenant_name,
self.score.monitoring_stack.version
)))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("TenantMonitoringInterpret")
}
fn get_version(&self) -> Version {
Version::from("1.0.0").unwrap()
}
fn get_status(&self) -> InterpretStatus {
InterpretStatus::SUCCESS
}
fn get_children(&self) -> Vec<harmony_types::id::Id> {
Vec::new()
}
}

View File

@@ -1,19 +0,0 @@
[package]
name = "brocade"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
async-trait.workspace = true
harmony_types = { path = "../harmony_types" }
russh.workspace = true
russh-keys.workspace = true
tokio.workspace = true
log.workspace = true
env_logger.workspace = true
regex = "1.11.3"
harmony_secret = { path = "../harmony_secret" }
serde.workspace = true
schemars = "0.8"

View File

@@ -1,75 +0,0 @@
use std::net::{IpAddr, Ipv4Addr};
use brocade::{BrocadeOptions, ssh};
use harmony_secret::{Secret, SecretManager};
use harmony_types::switch::PortLocation;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Secret, Clone, Debug, JsonSchema, Serialize, Deserialize)]
struct BrocadeSwitchAuth {
username: String,
password: String,
}
#[tokio::main]
async fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
let switch_addresses = vec![ip];
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.unwrap();
let brocade = brocade::init(
&switch_addresses,
&config.username,
&config.password,
&BrocadeOptions {
dry_run: true,
ssh: ssh::SshOptions {
port: 2222,
..Default::default()
},
..Default::default()
},
)
.await
.expect("Brocade client failed to connect");
let entries = brocade.get_stack_topology().await.unwrap();
println!("Stack topology: {entries:#?}");
let entries = brocade.get_interfaces().await.unwrap();
println!("Interfaces: {entries:#?}");
let version = brocade.version().await.unwrap();
println!("Version: {version:?}");
println!("--------------");
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
println!("VLAN\tMAC\t\t\tPORT");
for mac in mac_adddresses {
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
}
println!("--------------");
todo!();
let channel_name = "1";
brocade.clear_port_channel(channel_name).await.unwrap();
println!("--------------");
let channel_id = brocade.find_available_channel_id().await.unwrap();
println!("--------------");
let channel_name = "HARMONY_LAG";
let ports = [PortLocation(2, 0, 35)];
brocade
.create_port_channel(channel_id, channel_name, &ports)
.await
.unwrap();
}

View File

@@ -1,228 +0,0 @@
use super::BrocadeClient;
use crate::{
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
};
use async_trait::async_trait;
use harmony_types::switch::{PortDeclaration, PortLocation};
use log::{debug, info};
use regex::Regex;
use std::{collections::HashSet, str::FromStr};
#[derive(Debug)]
pub struct FastIronClient {
shell: BrocadeShell,
version: BrocadeInfo,
}
impl FastIronClient {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["skip-page-display".into()]);
shell.after_all(vec!["page".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 3 {
return None;
}
let (vlan, mac_address, port) = match parts.len() {
3 => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[2].to_string(),
),
_ => (
1,
parse_brocade_mac_address(parts[0]).ok()?,
parts[1].to_string(),
),
};
let port =
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
match port {
Ok(p) => Some(Ok(MacAddressEntry {
vlan,
mac_address,
port: p,
})),
Err(e) => Some(Err(e)),
}
}
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing stack port entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[0]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: None,
}))
}
fn build_port_channel_commands(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Vec<String> {
let mut commands = vec![
"configure terminal".to_string(),
format!("lag {channel_name} static id {channel_id}"),
];
for port in ports {
commands.push(format!("ports ethernet {port}"));
}
commands.push(format!("primary-port {}", ports[0]));
commands.push("deploy".into());
commands.push("exit".into());
commands.push("write memory".into());
commands.push("exit".into());
commands
}
}
#[async_trait]
impl BrocadeClient for FastIronClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
Ok(self.version.clone())
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
info!("[Brocade] Showing MAC address table...");
let output = self
.shell
.run_command("show mac-address", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(2)
.filter_map(|line| self.parse_mac_entry(line))
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_stack_port_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
todo!()
}
async fn configure_interfaces(
&self,
_interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
todo!()
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
let output = self
.shell
.run_command("show lag", ExecutionMode::Regular)
.await?;
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
let used_ids: HashSet<u8> = output
.lines()
.filter_map(|line| {
re.captures(line)
.and_then(|c| c.get(1))
.and_then(|id_match| id_match.as_str().parse().ok())
})
.collect();
let mut next_id: u8 = 1;
loop {
if !used_ids.contains(&next_id) {
break;
}
next_id += 1;
}
info!("[Brocade] Found channel id: {next_id}");
Ok(next_id)
}
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
);
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Port-channel '{channel_name}' configured.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".to_string(),
format!("no lag {channel_name}"),
"write memory".to_string(),
];
self.shell
.run_commands(commands, ExecutionMode::Privileged)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
let commands = vec![
"configure terminal".into(),
"snmp-server view ALL 1 included".into(),
"snmp-server group public v3 priv read ALL".into(),
format!(
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
Ok(())
}
}

View File

@@ -1,352 +0,0 @@
use std::net::IpAddr;
use std::{
fmt::{self, Display},
time::Duration,
};
use crate::network_operating_system::NetworkOperatingSystemClient;
use crate::{
fast_iron::FastIronClient,
shell::{BrocadeSession, BrocadeShell},
};
use async_trait::async_trait;
use harmony_types::net::MacAddress;
use harmony_types::switch::{PortDeclaration, PortLocation};
use regex::Regex;
use serde::Serialize;
mod fast_iron;
mod network_operating_system;
mod shell;
pub mod ssh;
#[derive(Default, Clone, Debug)]
pub struct BrocadeOptions {
pub dry_run: bool,
pub ssh: ssh::SshOptions,
pub timeouts: TimeoutConfig,
}
#[derive(Clone, Debug)]
pub struct TimeoutConfig {
pub shell_ready: Duration,
pub command_execution: Duration,
pub command_output: Duration,
pub cleanup: Duration,
pub message_wait: Duration,
}
impl Default for TimeoutConfig {
fn default() -> Self {
Self {
shell_ready: Duration::from_secs(10),
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
cleanup: Duration::from_secs(10),
message_wait: Duration::from_millis(500),
}
}
}
enum ExecutionMode {
Regular,
Privileged,
}
#[derive(Clone, Debug)]
pub struct BrocadeInfo {
os: BrocadeOs,
_version: String,
}
#[derive(Clone, Debug)]
pub enum BrocadeOs {
NetworkOperatingSystem,
FastIron,
Unknown,
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct MacAddressEntry {
pub vlan: u16,
pub mac_address: MacAddress,
pub port: PortDeclaration,
}
pub type PortChannelId = u8;
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
///
/// This structure provides a standardized view of the topology regardless of the
/// underlying Brocade OS configuration (stacking vs. fabric).
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterSwitchLink {
/// The local port on the switch where the topology command was run.
pub local_port: PortLocation,
/// The port on the directly connected neighboring switch.
pub remote_port: Option<PortLocation>,
}
/// Represents the key running configuration status of a single switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct InterfaceInfo {
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
pub name: String,
/// The physical location of the interface.
pub port_location: PortLocation,
/// The parsed type and name prefix of the interface.
pub interface_type: InterfaceType,
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
pub operating_mode: Option<PortOperatingMode>,
/// Indicates the current state of the interface.
pub status: InterfaceStatus,
}
/// Categorizes the functional type of a switch interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceType {
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
Ethernet(String),
}
impl fmt::Display for InterfaceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterfaceType::Ethernet(name) => write!(f, "{name}"),
}
}
}
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
pub enum PortOperatingMode {
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
Fabric,
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
Trunk,
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
Access,
}
/// Defines the possible status of an interface.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum InterfaceStatus {
/// The interface is connected.
Connected,
/// The interface is not connected and is not expected to be.
NotConnected,
/// The interface is not connected but is expected to be (configured with `no shutdown`).
SfpAbsent,
}
pub async fn init(
ip_addresses: &[IpAddr],
username: &str,
password: &str,
options: &BrocadeOptions,
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
let shell = BrocadeShell::init(ip_addresses, username, password, options).await?;
let version_info = shell
.with_session(ExecutionMode::Regular, |session| {
Box::pin(get_brocade_info(session))
})
.await?;
Ok(match version_info.os {
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
BrocadeOs::NetworkOperatingSystem => {
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
}
BrocadeOs::Unknown => todo!(),
})
}
#[async_trait]
pub trait BrocadeClient: std::fmt::Debug {
/// Retrieves the operating system and version details from the connected Brocade switch.
///
/// This is typically the first call made after establishing a connection to determine
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
///
/// # Returns
///
/// A `BrocadeInfo` structure containing parsed OS type and version string.
async fn version(&self) -> Result<BrocadeInfo, Error>;
/// Retrieves the dynamically learned MAC address table from the switch.
///
/// This is crucial for discovering where specific network endpoints (MAC addresses)
/// are currently located on the physical ports.
///
/// # Returns
///
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
/// and the associated port name/index.
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
/// Derives the physical connections used to link multiple switches together
/// to form a single logical entity (stack, fabric, etc.).
///
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
/// to return a standardized view of the topology.
///
/// # Returns
///
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
/// If the switch is not stacked, returns an empty vector.
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
/// Retrieves the status for all interfaces
///
/// # Returns
///
/// A vector of `InterfaceInfo` structures.
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
async fn configure_interfaces(
&self,
interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error>;
/// Scans the existing configuration to find the next available (unused)
/// Port-Channel ID (`lag` or `trunk`) for assignment.
///
/// # Returns
///
/// The smallest, unassigned `PortChannelId` within the supported range.
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
/// using the specified channel ID and ports.
///
/// The resulting configuration must be persistent (saved to startup-config).
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
///
/// # Parameters
///
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error>;
/// Enables Simple Network Management Protocol (SNMP) server for switch
///
/// # Parameters
///
/// * `user_name`: The user name for the snmp server
/// * `auth`: The password for authentication process for verifying the identity of a device
/// * `des`: The Data Encryption Standard algorithm key
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error>;
/// Removes all configuration associated with the specified Port-Channel name.
///
/// This operation should be idempotent; attempting to clear a non-existent
/// channel should succeed (or return a benign error).
///
/// # Parameters
///
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
///
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
}
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
let output = session.run_command("show version").await?;
if output.contains("Network Operating System") {
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
.expect("Invalid regex");
let version = re
.captures(&output)
.and_then(|cap| cap.name("version"))
.map(|m| m.as_str().to_string())
.unwrap_or_default();
return Ok(BrocadeInfo {
os: BrocadeOs::NetworkOperatingSystem,
_version: version,
});
} else if output.contains("ICX") {
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
.expect("Invalid regex");
let version = re
.captures(&output)
.and_then(|cap| cap.name("version"))
.map(|m| m.as_str().to_string())
.unwrap_or_default();
return Ok(BrocadeInfo {
os: BrocadeOs::FastIron,
_version: version,
});
}
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
}
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
let cleaned_mac = value.replace('.', "");
if cleaned_mac.len() != 12 {
return Err(format!("Invalid MAC address: {value}"));
}
let mut bytes = [0u8; 6];
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
bytes[i] =
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
}
Ok(MacAddress(bytes))
}
#[derive(Debug)]
pub enum SecurityLevel {
AuthPriv(String),
}
#[derive(Debug)]
pub enum Error {
NetworkError(String),
AuthenticationError(String),
ConfigurationError(String),
TimeoutError(String),
UnexpectedError(String),
CommandError(String),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
Error::CommandError(msg) => write!(f, "{msg}"),
}
}
}
impl From<Error> for String {
fn from(val: Error) -> Self {
format!("{val}")
}
}
impl std::error::Error for Error {}
impl From<russh::Error> for Error {
fn from(value: russh::Error) -> Self {
Error::NetworkError(format!("Russh client error: {value}"))
}
}

View File

@@ -1,352 +0,0 @@
use std::str::FromStr;
use async_trait::async_trait;
use harmony_types::switch::{PortDeclaration, PortLocation};
use log::{debug, info};
use regex::Regex;
use crate::{
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
parse_brocade_mac_address, shell::BrocadeShell,
};
#[derive(Debug)]
pub struct NetworkOperatingSystemClient {
shell: BrocadeShell,
version: BrocadeInfo,
}
impl NetworkOperatingSystemClient {
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
shell.before_all(vec!["terminal length 0".into()]);
Self {
shell,
version: version_info,
}
}
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
debug!("[Brocade] Parsing mac address entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let (vlan, mac_address, port) = match parts.len() {
5 => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[4].to_string(),
),
_ => (
u16::from_str(parts[0]).ok()?,
parse_brocade_mac_address(parts[1]).ok()?,
parts[5].to_string(),
),
};
let port =
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
match port {
Ok(p) => Some(Ok(MacAddressEntry {
vlan,
mac_address,
port: p,
})),
Err(e) => Some(Err(e)),
}
}
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
debug!("[Brocade] Parsing inter switch link entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
return None;
}
let local_port = PortLocation::from_str(parts[2]).ok()?;
let remote_port = PortLocation::from_str(parts[5]).ok()?;
Some(Ok(InterSwitchLink {
local_port,
remote_port: Some(remote_port),
}))
}
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
debug!("[Brocade] Parsing interface status entry: {line}");
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 6 {
return None;
}
let interface_type = match parts[0] {
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
_ => return None,
};
let port_location = PortLocation::from_str(parts[1]).ok()?;
let status = match parts[2] {
"connected" => InterfaceStatus::Connected,
"notconnected" => InterfaceStatus::NotConnected,
"sfpAbsent" => InterfaceStatus::SfpAbsent,
_ => return None,
};
let operating_mode = match parts[3] {
"ISL" => Some(PortOperatingMode::Fabric),
"Trunk" => Some(PortOperatingMode::Trunk),
"Access" => Some(PortOperatingMode::Access),
"--" => None,
_ => return None,
};
Some(Ok(InterfaceInfo {
name: format!("{interface_type} {port_location}"),
port_location,
interface_type,
operating_mode,
status,
}))
}
fn map_configure_interfaces_error(&self, err: Error) -> Error {
debug!("[Brocade] {err}");
if let Error::CommandError(message) = &err {
if message.contains("switchport")
&& message.contains("Cannot configure aggregator member")
{
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
if let Some(caps) = re.captures(message) {
let interface_type = &caps[1];
let port_location = &caps[2];
let interface = format!("{interface_type} {port_location}");
return Error::CommandError(format!(
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
));
}
}
}
err
}
}
#[async_trait]
impl BrocadeClient for NetworkOperatingSystemClient {
async fn version(&self) -> Result<BrocadeInfo, Error> {
Ok(self.version.clone())
}
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
let output = self
.shell
.run_command("show mac-address-table", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(1)
.filter_map(|line| self.parse_mac_entry(line))
.collect()
}
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
let output = self
.shell
.run_command("show fabric isl", ExecutionMode::Regular)
.await?;
output
.lines()
.skip(6)
.filter_map(|line| self.parse_inter_switch_link_entry(line))
.collect()
}
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
let output = self
.shell
.run_command(
"show interface status rbridge-id all",
ExecutionMode::Regular,
)
.await?;
output
.lines()
.skip(2)
.filter_map(|line| self.parse_interface_status_entry(line))
.collect()
}
async fn configure_interfaces(
&self,
interfaces: &Vec<(String, PortOperatingMode)>,
) -> Result<(), Error> {
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
let mut commands = vec!["configure terminal".to_string()];
for interface in interfaces {
commands.push(format!("interface {}", interface.0));
match interface.1 {
PortOperatingMode::Fabric => {
commands.push("fabric isl enable".into());
commands.push("fabric trunk enable".into());
}
PortOperatingMode::Trunk => {
commands.push("switchport".into());
commands.push("switchport mode trunk".into());
commands.push("switchport trunk allowed vlan all".into());
commands.push("no switchport trunk tag native-vlan".into());
commands.push("spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push("no shutdown".into());
}
PortOperatingMode::Access => {
commands.push("switchport".into());
commands.push("switchport mode access".into());
commands.push("switchport access vlan 1".into());
commands.push("no spanning-tree shutdown".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
}
}
commands.push("no shutdown".into());
commands.push("exit".into());
}
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await
.map_err(|err| self.map_configure_interfaces_error(err))?;
info!("[Brocade] Interfaces configured.");
Ok(())
}
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
info!("[Brocade] Finding next available channel id...");
let output = self
.shell
.run_command("show port-channel summary", ExecutionMode::Regular)
.await?;
let used_ids: Vec<u8> = output
.lines()
.skip(6)
.filter_map(|line| {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 8 {
return None;
}
u8::from_str(parts[0]).ok()
})
.collect();
let mut next_id: u8 = 1;
loop {
if !used_ids.contains(&next_id) {
break;
}
next_id += 1;
}
info!("[Brocade] Found channel id: {next_id}");
Ok(next_id)
}
async fn create_port_channel(
&self,
channel_id: PortChannelId,
channel_name: &str,
ports: &[PortLocation],
) -> Result<(), Error> {
info!(
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
ports
.iter()
.map(|p| format!("{p}"))
.collect::<Vec<String>>()
.join(", ")
);
let interfaces = self.get_interfaces().await?;
let mut commands = vec![
"configure terminal".into(),
format!("interface port-channel {}", channel_id),
"no shutdown".into(),
"exit".into(),
];
for port in ports {
let interface = interfaces.iter().find(|i| i.port_location == *port);
let Some(interface) = interface else {
continue;
};
commands.push(format!("interface {}", interface.name));
commands.push("no switchport".into());
commands.push("no ip address".into());
commands.push("no fabric isl enable".into());
commands.push("no fabric trunk enable".into());
commands.push(format!("channel-group {channel_id} mode active"));
commands.push("no shutdown".into());
commands.push("exit".into());
}
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' configured.");
Ok(())
}
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
info!("[Brocade] Clearing port-channel: {channel_name}");
let commands = vec![
"configure terminal".into(),
format!("no interface port-channel {}", channel_name),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
info!("[Brocade] Port-channel '{channel_name}' cleared.");
Ok(())
}
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
let commands = vec![
"configure terminal".into(),
"snmp-server view ALL 1 included".into(),
"snmp-server group public v3 priv read ALL".into(),
format!(
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
),
"exit".into(),
];
self.shell
.run_commands(commands, ExecutionMode::Regular)
.await?;
Ok(())
}
}

View File

@@ -1,367 +0,0 @@
use std::net::IpAddr;
use std::time::Duration;
use std::time::Instant;
use crate::BrocadeOptions;
use crate::Error;
use crate::ExecutionMode;
use crate::TimeoutConfig;
use crate::ssh;
use log::debug;
use log::info;
use russh::ChannelMsg;
use tokio::time::timeout;
#[derive(Debug)]
pub struct BrocadeShell {
ip: IpAddr,
username: String,
password: String,
options: BrocadeOptions,
before_all_commands: Vec<String>,
after_all_commands: Vec<String>,
}
impl BrocadeShell {
pub async fn init(
ip_addresses: &[IpAddr],
username: &str,
password: &str,
options: &BrocadeOptions,
) -> Result<Self, Error> {
let ip = ip_addresses
.first()
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
let brocade_ssh_client_options =
ssh::try_init_client(username, password, ip, options).await?;
Ok(Self {
ip: *ip,
username: username.to_string(),
password: password.to_string(),
before_all_commands: vec![],
after_all_commands: vec![],
options: brocade_ssh_client_options,
})
}
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
BrocadeSession::open(
self.ip,
self.options.ssh.port,
&self.username,
&self.password,
self.options.clone(),
mode,
)
.await
}
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
where
F: FnOnce(
&mut BrocadeSession,
) -> std::pin::Pin<
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
>,
{
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = callback(&mut session).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_command(command).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub async fn run_commands(
&self,
commands: Vec<String>,
mode: ExecutionMode,
) -> Result<(), Error> {
let mut session = self.open_session(mode).await?;
let _ = session.run_commands(self.before_all_commands.clone()).await;
let result = session.run_commands(commands).await;
let _ = session.run_commands(self.after_all_commands.clone()).await;
session.close().await?;
result
}
pub fn before_all(&mut self, commands: Vec<String>) {
self.before_all_commands = commands;
}
pub fn after_all(&mut self, commands: Vec<String>) {
self.after_all_commands = commands;
}
}
pub struct BrocadeSession {
pub channel: russh::Channel<russh::client::Msg>,
pub mode: ExecutionMode,
pub options: BrocadeOptions,
}
impl BrocadeSession {
pub async fn open(
ip: IpAddr,
port: u16,
username: &str,
password: &str,
options: BrocadeOptions,
mode: ExecutionMode,
) -> Result<Self, Error> {
let client = ssh::create_client(ip, port, username, password, &options).await?;
let mut channel = client.channel_open_session().await?;
channel
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
.await?;
channel.request_shell(false).await?;
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
if let ExecutionMode::Privileged = mode {
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
}
Ok(Self {
channel,
mode,
options,
})
}
pub async fn close(&mut self) -> Result<(), Error> {
debug!("[Brocade] Closing session...");
self.channel.data(&b"exit\n"[..]).await?;
if let ExecutionMode::Privileged = self.mode {
self.channel.data(&b"exit\n"[..]).await?;
}
let start = Instant::now();
while start.elapsed() < self.options.timeouts.cleanup {
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
Ok(Some(ChannelMsg::Close)) => break,
Ok(Some(_)) => continue,
Ok(None) | Err(_) => break,
}
}
debug!("[Brocade] Session closed.");
Ok(())
}
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
if self.should_skip_command(command) {
return Ok(String::new());
}
debug!("[Brocade] Running command: '{command}'...");
self.channel
.data(format!("{}\n", command).as_bytes())
.await?;
tokio::time::sleep(Duration::from_millis(100)).await;
let output = self.collect_command_output().await?;
let output = String::from_utf8(output)
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
self.check_for_command_errors(&output, command)?;
Ok(output)
}
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
for command in commands {
self.run_command(&command).await?;
}
Ok(())
}
fn should_skip_command(&self, command: &str) -> bool {
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
return true;
}
false
}
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
let mut output = Vec::new();
let start = Instant::now();
let read_timeout = Duration::from_millis(500);
let log_interval = Duration::from_secs(5);
let mut last_log = Instant::now();
loop {
if start.elapsed() > self.options.timeouts.command_execution {
return Err(Error::TimeoutError(
"Timeout waiting for command completion.".into(),
));
}
if start.elapsed() > self.options.timeouts.command_output
&& last_log.elapsed() > log_interval
{
info!("[Brocade] Waiting for command output...");
last_log = Instant::now();
}
match timeout(read_timeout, self.channel.wait()).await {
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
output.extend_from_slice(&data);
let current_output = String::from_utf8_lossy(&output);
if current_output.contains('>') || current_output.contains('#') {
return Ok(output);
}
}
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
debug!("[Brocade] Command exit status: {exit_status}");
}
Ok(Some(_)) => continue,
Ok(None) | Err(_) => {
if output.is_empty() {
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
break;
}
continue;
}
tokio::time::sleep(Duration::from_millis(100)).await;
let current_output = String::from_utf8_lossy(&output);
if current_output.contains('>') || current_output.contains('#') {
return Ok(output);
}
}
}
}
Ok(output)
}
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
const ERROR_PATTERNS: &[&str] = &[
"invalid input",
"syntax error",
"command not found",
"unknown command",
"permission denied",
"access denied",
"authentication failed",
"configuration error",
"failed to",
"error:",
];
let output_lower = output.to_lowercase();
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
return Err(Error::CommandError(format!(
"Command error: {}",
output.trim()
)));
}
if !command.starts_with("show") && output.trim().is_empty() {
return Err(Error::CommandError(format!(
"Command '{command}' produced no output"
)));
}
Ok(())
}
}
async fn wait_for_shell_ready(
channel: &mut russh::Channel<russh::client::Msg>,
timeouts: &TimeoutConfig,
) -> Result<(), Error> {
let mut buffer = Vec::new();
let start = Instant::now();
while start.elapsed() < timeouts.shell_ready {
match timeout(timeouts.message_wait, channel.wait()).await {
Ok(Some(ChannelMsg::Data { data })) => {
buffer.extend_from_slice(&data);
let output = String::from_utf8_lossy(&buffer);
let output = output.trim();
if output.ends_with('>') || output.ends_with('#') {
debug!("[Brocade] Shell ready");
return Ok(());
}
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
Ok(())
}
async fn try_elevate_session(
channel: &mut russh::Channel<russh::client::Msg>,
username: &str,
password: &str,
timeouts: &TimeoutConfig,
) -> Result<(), Error> {
channel.data(&b"enable\n"[..]).await?;
let start = Instant::now();
let mut buffer = Vec::new();
while start.elapsed() < timeouts.shell_ready {
match timeout(timeouts.message_wait, channel.wait()).await {
Ok(Some(ChannelMsg::Data { data })) => {
buffer.extend_from_slice(&data);
let output = String::from_utf8_lossy(&buffer);
if output.ends_with('#') {
debug!("[Brocade] Privileged mode established");
return Ok(());
}
if output.contains("User Name:") {
channel.data(format!("{}\n", username).as_bytes()).await?;
buffer.clear();
} else if output.contains("Password:") {
channel.data(format!("{}\n", password).as_bytes()).await?;
buffer.clear();
} else if output.contains('>') {
return Err(Error::AuthenticationError(
"Enable authentication failed".into(),
));
}
}
Ok(Some(_)) => continue,
Ok(None) => break,
Err(_) => continue,
}
}
let output = String::from_utf8_lossy(&buffer);
if output.ends_with('#') {
debug!("[Brocade] Privileged mode established");
Ok(())
} else {
Err(Error::AuthenticationError(format!(
"Enable failed. Output:\n{output}"
)))
}
}

View File

@@ -1,131 +0,0 @@
use std::borrow::Cow;
use std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use russh::client::Handler;
use russh::kex::DH_G1_SHA1;
use russh::kex::ECDH_SHA2_NISTP256;
use russh_keys::key::SSH_RSA;
use super::BrocadeOptions;
use super::Error;
#[derive(Clone, Debug)]
pub struct SshOptions {
pub preferred_algorithms: russh::Preferred,
pub port: u16,
}
impl Default for SshOptions {
fn default() -> Self {
Self {
preferred_algorithms: Default::default(),
port: 22,
}
}
}
impl SshOptions {
fn ecdhsa_sha2_nistp256(port: u16) -> Self {
Self {
preferred_algorithms: russh::Preferred {
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
key: Cow::Borrowed(&[SSH_RSA]),
..Default::default()
},
port,
..Default::default()
}
}
fn legacy(port: u16) -> Self {
Self {
preferred_algorithms: russh::Preferred {
kex: Cow::Borrowed(&[DH_G1_SHA1]),
key: Cow::Borrowed(&[SSH_RSA]),
..Default::default()
},
port,
..Default::default()
}
}
}
pub struct Client;
#[async_trait]
impl Handler for Client {
type Error = Error;
async fn check_server_key(
&mut self,
_server_public_key: &russh_keys::key::PublicKey,
) -> Result<bool, Self::Error> {
Ok(true)
}
}
pub async fn try_init_client(
username: &str,
password: &str,
ip: &std::net::IpAddr,
base_options: &BrocadeOptions,
) -> Result<BrocadeOptions, Error> {
let mut default = SshOptions::default();
default.port = base_options.ssh.port;
let ssh_options = vec![
default,
SshOptions::ecdhsa_sha2_nistp256(base_options.ssh.port),
SshOptions::legacy(base_options.ssh.port),
];
for ssh in ssh_options {
let opts = BrocadeOptions {
ssh: ssh.clone(),
..base_options.clone()
};
debug!("Creating client {ip}:{} {username}", ssh.port);
let client = create_client(*ip, ssh.port, username, password, &opts).await;
match client {
Ok(_) => {
return Ok(opts);
}
Err(e) => match e {
Error::NetworkError(e) => {
if e.contains("No common key exchange algorithm") {
continue;
} else {
return Err(Error::NetworkError(e));
}
}
_ => return Err(e),
},
}
}
Err(Error::NetworkError(
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
))
}
pub async fn create_client(
ip: std::net::IpAddr,
port: u16,
username: &str,
password: &str,
options: &BrocadeOptions,
) -> Result<russh::client::Handle<Client>, Error> {
let config = russh::client::Config {
preferred: options.ssh.preferred_algorithms.clone(),
..Default::default()
};
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
if !client.authenticate_password(username, password).await? {
return Err(Error::AuthenticationError(
"ssh authentication failed".to_string(),
));
}
Ok(client)
}

Binary file not shown.

View File

@@ -1,46 +1 @@
# Harmony Documentation Hub
Welcome to the Harmony documentation. This is the main entry point for learning everything from core concepts to building your own Score, Topologies, and Capabilities.
## 1. Getting Started
If you're new to Harmony, start here:
- [**Getting Started Guide**](./guides/getting-started.md): A step-by-step tutorial that takes you from an empty project to deploying your first application.
- [**Core Concepts**](./concepts.md): A high-level overview of the key concepts in Harmony: `Score`, `Topology`, `Capability`, `Inventory`, `Interpret`, ...
## 2. Use Cases & Examples
See how to use Harmony to solve real-world problems.
- [**OKD on Bare Metal**](./use-cases/okd-on-bare-metal.md): A detailed walkthrough of bootstrapping a high-availability OKD cluster from physical hardware.
- [**Deploy a Rust Web App**](./use-cases/deploy-rust-webapp.md): A quick guide to deploying a monitored, containerized web application to a Kubernetes cluster.
## 3. Component Catalogs
Discover existing, reusable components you can use in your Harmony projects.
- [**Scores Catalog**](./catalogs/scores.md): A categorized list of all available `Scores` (the "what").
- [**Topologies Catalog**](./catalogs/topologies.md): A list of all available `Topologies` (the "where").
- [**Capabilities Catalog**](./catalogs/capabilities.md): A list of all available `Capabilities` (the "how").
## 4. Developer Guides
Ready to build your own components? These guides show you how.
- [**Writing a Score**](./guides/writing-a-score.md): Learn how to create your own `Score` and `Interpret` logic to define a new desired state.
- [**Writing a Topology**](./guides/writing-a-topology.md): Learn how to model a new environment (like AWS, GCP, or custom hardware) as a `Topology`.
- [**Adding Capabilities**](./guides/adding-capabilities.md): See how to add a `Capability` to your custom `Topology`.
- [**Coding Guide**](./coding-guide.md): Conventions and best practices for writing Harmony code.
## 5. Module Documentation
Deep dives into specific Harmony modules and features.
- [**Monitoring and Alerting**](./monitoring.md): Comprehensive guide to cluster, tenant, and application-level monitoring with support for OKD, KubePrometheus, RHOB, and more.
## 6. Architecture Decision Records
Important architectural decisions are documented in the `adr/` directory:
- [Full ADR Index](../adr/)
Not much here yet, see the `adr` folder for now. More to come in time!

View File

@@ -1,7 +0,0 @@
# Component Catalogs
This section is the "dictionary" for Harmony. It lists all the reusable components available out-of-the-box.
- [**Scores Catalog**](./scores.md): Discover all available `Scores` (the "what").
- [**Topologies Catalog**](./topologies.md): A list of all available `Topologies` (the "where").
- [**Capabilities Catalog**](./capabilities.md): A list of all available `Capabilities` (the "how").

View File

@@ -1,40 +0,0 @@
# Capabilities Catalog
A `Capability` is a specific feature or API that a `Topology` offers. `Interpret` logic uses these capabilities to execute a `Score`.
This list is primarily for developers **writing new Topologies or Scores**. As a user, you just need to know that the `Topology` you pick (like `K8sAnywhereTopology`) provides the capabilities your `Scores` (like `ApplicationScore`) need.
<!--toc:start-->
- [Capabilities Catalog](#capabilities-catalog)
- [Kubernetes & Application](#kubernetes-application)
- [Monitoring & Observability](#monitoring-observability)
- [Networking (Core Services)](#networking-core-services)
- [Networking (Hardware & Host)](#networking-hardware-host)
<!--toc:end-->
## Kubernetes & Application
- **K8sClient**: Provides an authenticated client to interact with a Kubernetes API (create/read/update/delete resources).
- **HelmCommand**: Provides the ability to execute Helm commands (install, upgrade, template).
- **TenantManager**: Provides methods for managing tenants in a multi-tenant cluster.
- **Ingress**: Provides an interface for managing ingress controllers and resources.
## Monitoring & Observability
- **Grafana**: Provides an API for configuring Grafana (datasources, dashboards).
- **Monitoring**: A general capability for configuring monitoring (e.g., creating Prometheus rules).
## Networking (Core Services)
- **DnsServer**: Provides an interface for creating and managing DNS records.
- **LoadBalancer**: Provides an interface for configuring a load balancer (e.g., OPNsense, MetalLB).
- **DhcpServer**: Provides an interface for managing DHCP leases and host bindings.
- **TftpServer**: Provides an interface for managing files on a TFTP server (e.g., iPXE boot files).
## Networking (Hardware & Host)
- **Router**: Provides an interface for configuring routing rules, typically on a firewall like OPNsense.
- **Switch**: Provides an interface for configuring a physical network switch (e.g., managing VLANs and port channels).
- **NetworkManager**: Provides an interface for configuring host-level networking (e.g., creating bonds and bridges on a node).

View File

@@ -1,102 +0,0 @@
# Scores Catalog
A `Score` is a declarative description of a desired state. Find the Score you need and add it to your `harmony!` block's `scores` array.
<!--toc:start-->
- [Scores Catalog](#scores-catalog)
- [Application Deployment](#application-deployment)
- [OKD / Kubernetes Cluster Setup](#okd-kubernetes-cluster-setup)
- [Cluster Services & Management](#cluster-services-management)
- [Monitoring & Alerting](#monitoring-alerting)
- [Infrastructure & Networking (Bare Metal)](#infrastructure-networking-bare-metal)
- [Infrastructure & Networking (Cluster)](#infrastructure-networking-cluster)
- [Tenant Management](#tenant-management)
- [Utility](#utility)
<!--toc:end-->
## Application Deployment
Scores for deploying and managing end-user applications.
- **ApplicationScore**: The primary score for deploying a web application. Describes the application, its framework, and the features it requires (e.g., monitoring, CI/CD).
- **HelmChartScore**: Deploys a generic Helm chart to a Kubernetes cluster.
- **ArgoHelmScore**: Deploys an application using an ArgoCD Helm chart.
- **LAMPScore**: A specialized score for deploying a classic LAMP (Linux, Apache, MySQL, PHP) stack.
## OKD / Kubernetes Cluster Setup
This collection of Scores is used to provision an entire OKD cluster from bare metal. They are typically used in order.
- **OKDSetup01InventoryScore**: Discovers and catalogs the physical hardware.
- **OKDSetup02BootstrapScore**: Configures the bootstrap node, renders iPXE files, and kicks off the SCOS installation.
- **OKDSetup03ControlPlaneScore**: Renders iPXE configurations for the control plane nodes.
- **OKDSetupPersistNetworkBondScore**: Configures network bonds on the nodes and port channels on the switches.
- **OKDSetup04WorkersScore**: Renders iPXE configurations for the worker nodes.
- **OKDSetup06InstallationReportScore**: Runs post-installation checks and generates a report.
- **OKDUpgradeScore**: Manages the upgrade process for an existing OKD cluster.
## Cluster Services & Management
Scores for installing and managing services _inside_ a Kubernetes cluster.
- **K3DInstallationScore**: Installs and configes a local K3D (k3s-in-docker) cluster. Used by `K8sAnywhereTopology`.
- **CertManagerHelmScore**: Deploys the `cert-manager` Helm chart.
- **ClusterIssuerScore**: Configures a `ClusterIssuer` for `cert-manager`, (e.g., for Let's Encrypt).
- **K8sNamespaceScore**: Ensures a Kubernetes namespace exists.
- **K8sDeploymentScore**: Deploys a generic `Deployment` resource to Kubernetes.
- **K8sIngressScore**: Configures an `Ingress` resource for a service.
## Monitoring & Alerting
Scores for configuring observability, dashboards, and alerts.
- **ApplicationMonitoringScore**: A generic score to set up monitoring for an application.
- **ApplicationRHOBMonitoringScore**: A specialized score for setting up monitoring via the Red Hat Observability stack.
- **HelmPrometheusAlertingScore**: Configures Prometheus alerts via a Helm chart.
- **K8sPrometheusCRDAlertingScore**: Configures Prometheus alerts using the `PrometheusRule` CRD.
- **PrometheusAlertScore**: A generic score for creating a Prometheus alert.
- **RHOBAlertingScore**: Configures alerts specifically for the Red Hat Observability stack.
- **NtfyScore**: Configures alerts to be sent to a `ntfy.sh` server.
## Infrastructure & Networking (Bare Metal)
Low-level scores for managing physical hardware and network services.
- **DhcpScore**: Configures a DHCP server.
- **OKDDhcpScore**: A specialized DHCP configuration for the OKD bootstrap process.
- **OKDBootstrapDhcpScore**: Configures DHCP specifically for the bootstrap node.
- **DhcpHostBindingScore**: Creates a specific MAC-to-IP binding in the DHCP server.
- **DnsScore**: Configures a DNS server.
- **OKDDnsScore**: A specialized DNS configuration for the OKD cluster (e.g., `api.*`, `*.apps.*`).
- **StaticFilesHttpScore**: Serves a directory of static files (e.g., a documentation site) over HTTP.
- **TftpScore**: Configures a TFTP server, typically for serving iPXE boot files.
- **IPxeMacBootFileScore**: Assigns a specific iPXE boot file to a MAC address in the TFTP server.
- **OKDIpxeScore**: A specialized score for generating the iPXE boot scripts for OKD.
- **OPNsenseShellCommandScore**: Executes a shell command on an OPNsense firewall.
## Infrastructure & Networking (Cluster)
Network services that run inside the cluster or as part of the topology.
- **LoadBalancerScore**: Configures a general-purpose load balancer.
- **OKDLoadBalancerScore**: Configures the high-availability load balancers for the OKD API and ingress.
- **OKDBootstrapLoadBalancerScore**: Configures the load balancer specifically for the bootstrap-time API endpoint.
- **K8sIngressScore**: Configures an Ingress controller or resource.
- [HighAvailabilityHostNetworkScore](../../harmony/src/modules/okd/host_network.rs): Configures network bonds on a host and the corresponding port-channels on the switch stack for high-availability.
## Tenant Management
Scores for managing multi-tenancy within a cluster.
- **TenantScore**: Creates a new tenant (e.g., a namespace, quotas, network policies).
- **TenantCredentialScore**: Generates and provisions credentials for a new tenant.
## Utility
Helper scores for discovery and inspection.
- **LaunchDiscoverInventoryAgentScore**: Launches the agent responsible for the `OKDSetup01InventoryScore`.
- **DiscoverHostForRoleScore**: A utility score to find a host matching a specific role in the inventory.
- **InspectInventoryScore**: Dumps the discovered inventory for inspection.

View File

@@ -1,59 +0,0 @@
# Topologies Catalog
A `Topology` is the logical representation of your infrastructure and its `Capabilities`. You select a `Topology` in your Harmony project to define _where_ your `Scores` will be applied.
<!--toc:start-->
- [Topologies Catalog](#topologies-catalog)
- [HAClusterTopology](#haclustertopology)
- [K8sAnywhereTopology](#k8sanywheretopology)
<!--toc:end-->
### HAClusterTopology
- **`HAClusterTopology::autoload()`**
This `Topology` represents a high-availability, bare-metal cluster. It is designed for production-grade deployments like OKD.
It models an environment consisting of:
- At least 3 cluster nodes (for control plane/workers)
- 2 redundant firewalls (e.g., OPNsense)
- 2 redundant network switches
**Provided Capabilities:**
This topology provides a rich set of capabilities required for bare-metal provisioning and cluster management, including:
- `K8sClient` (once the cluster is bootstrapped)
- `DnsServer`
- `LoadBalancer`
- `DhcpServer`
- `TftpServer`
- `Router` (via the firewalls)
- `Switch`
- `NetworkManager` (for host-level network config)
---
### K8sAnywhereTopology
- **`K8sAnywhereTopology::from_env()`**
This `Topology` is designed for development and application deployment. It provides a simple, abstract way to deploy to _any_ Kubernetes cluster.
**How it works:**
1. By default (`from_env()` with no env vars), it automatically provisions a **local K3D (k3s-in-docker) cluster** on your machine. This is perfect for local development and testing.
2. If you provide a `KUBECONFIG` environment variable, it will instead connect to that **existing Kubernetes cluster** (e.g., your staging or production OKD cluster).
This allows you to use the _exact same code_ to deploy your application locally as you do to deploy it to production.
**Provided Capabilities:**
- `K8sClient`
- `HelmCommand`
- `TenantManager`
- `Ingress`
- `Monitoring`
- ...and more.

View File

@@ -1,299 +0,0 @@
# Harmony Coding Guide
Harmony is an infrastructure automation framework. It is **code-first and code-only**: operators write Rust programs to declare and drive infrastructure, rather than YAML files or DSL configs. Good code here means a good operator experience.
### Concrete context
We use here the context of the KVM module to explain the coding style. This will make it very easy to understand and should translate quite well to other modules/contexts managed by Harmony like OPNSense and Kubernetes.
## Core Philosophy
### The Careful Craftsman Principle
Harmony is a powerful framework that does a lot. With that power comes responsibility. Every abstraction, every trait, every module must earn its place. Before adding anything, ask:
1. **Does this solve a real problem users have?** Not a theoretical problem, an actual one encountered in production.
2. **Is this the simplest solution that works?** Complexity is a cost that compounds over time.
3. **Will this make the next developer's life easier or harder?** Code is read far more often than written.
When in doubt, don't abstract. Wait for the pattern to emerge from real usage. A little duplication is better than the wrong abstraction.
### High-level functions over raw primitives
Callers should not need to know about underlying protocols, XML schemas, or API quirks. A function that deploys a VM should accept meaningful parameters like CPU count, memory, and network name — not XML strings.
```rust
// Bad: caller constructs XML and passes it to a thin wrapper
let xml = format!(r#"<domain type='kvm'>...</domain>"#, name, memory_kb, ...);
executor.create_vm(&xml).await?;
// Good: caller describes intent, the module handles representation
executor.define_vm(&VmConfig::builder("my-vm")
.cpu(4)
.memory_gb(8)
.disk(DiskConfig::new(50))
.network(NetworkRef::named("mylan"))
.boot_order([BootDevice::Network, BootDevice::Disk])
.build())
.await?;
```
The module owns the XML, the virsh invocations, the API calls — not the caller.
### Use the right abstraction layer
Prefer native library bindings over shelling out to CLI tools. The `virt` crate provides direct libvirt bindings and should be used instead of spawning `virsh` subprocesses.
- CLI subprocess calls are fragile: stdout/stderr parsing, exit codes, quoting, PATH differences
- Native bindings give typed errors, no temp files, no shell escaping
- `virt::connect::Connect` opens a connection; `virt::domain::Domain` manages VMs; `virt::network::Network` manages virtual networks
### Keep functions small and well-named
Each function should do one thing. If a function is doing two conceptually separate things, split it. Function names should read like plain English: `ensure_network_active`, `define_vm`, `vm_is_running`.
### Prefer short modules over large files
Group related types and functions by concept. A module that handles one resource (e.g., network, domain, storage) is better than a single file for everything.
---
## Error Handling
### Use `thiserror` for all error types
Define error types with `thiserror::Error`. This removes the boilerplate of implementing `Display` and `std::error::Error` by hand, keeps error messages close to their variants, and makes types easy to extend.
```rust
// Bad: hand-rolled Display + std::error::Error
#[derive(Debug)]
pub enum KVMError {
ConnectionError(String),
VMNotFound(String),
}
impl std::fmt::Display for KVMError { ... }
impl std::error::Error for KVMError {}
// Good: derive Display via thiserror
#[derive(thiserror::Error, Debug)]
pub enum KVMError {
#[error("connection failed: {0}")]
ConnectionFailed(String),
#[error("VM not found: {name}")]
VmNotFound { name: String },
}
```
### Make bubbling errors easy with `?` and `From`
`?` works on any error type for which there is a `From` impl. Add `From` conversions from lower-level errors into your module's error type so callers can use `?` without boilerplate.
With `thiserror`, wrapping a foreign error is one line:
```rust
#[derive(thiserror::Error, Debug)]
pub enum KVMError {
#[error("libvirt error: {0}")]
Libvirt(#[from] virt::error::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
}
```
This means a call that returns `virt::error::Error` can be `?`-propagated into a `Result<_, KVMError>` without any `.map_err(...)`.
### Typed errors over stringly-typed errors
Avoid `Box<dyn Error>` or `String` as error return types in library code. Callers need to distinguish errors programmatically — `KVMError::VmAlreadyExists` is actionable, `"VM already exists: foo"` as a `String` is not.
At binary entry points (e.g., `main`) it is acceptable to convert to `String` or `anyhow::Error` for display.
---
## Logging
### Use the `log` crate macros
All log output must go through the `log` crate. Never use `println!`, `eprintln!`, or `dbg!` in library code. This makes output compatible with any logging backend (env_logger, tracing, structured logging, etc.).
```rust
// Bad
println!("Creating VM: {}", name);
// Good
use log::{info, debug, warn};
info!("Creating VM: {name}");
debug!("VM XML:\n{xml}");
warn!("Network already active, skipping creation");
```
Use the right level:
| Level | When to use |
|---------|-------------|
| `error` | Unrecoverable failures (before returning Err) |
| `warn` | Recoverable issues, skipped steps |
| `info` | High-level progress events visible in normal operation |
| `debug` | Detailed operational info useful for debugging |
| `trace` | Very granular, per-iteration or per-call data |
Log before significant operations and after unexpected conditions. Do not log inside tight loops at `info` level.
---
## Types and Builders
### Derive `Serialize` on all public domain types
All public structs and enums that represent configuration or state should derive `serde::Serialize`. Add `Deserialize` when round-trip serialization is needed.
### Builder pattern for complex configs
When a type has more than three fields or optional fields, provide a builder. The builder pattern allows named, incremental construction without positional arguments.
```rust
let config = VmConfig::builder("bootstrap")
.cpu(4)
.memory_gb(8)
.disk(DiskConfig::new(50).labeled("os"))
.disk(DiskConfig::new(100).labeled("data"))
.network(NetworkRef::named("harmonylan"))
.boot_order([BootDevice::Network, BootDevice::Disk])
.build();
```
### Avoid `pub` fields on config structs
Expose data through methods or the builder, not raw field access. This preserves the ability to validate, rename, or change representation without breaking callers.
---
## Async
### Use `tokio` for all async runtime needs
All async code runs on tokio. Use `tokio::spawn`, `tokio::time`, etc. Use `#[async_trait]` for traits with async methods.
### No blocking in async context
Never call blocking I/O (file I/O, network, process spawn) directly in an async function. Use `tokio::fs`, `tokio::process`, or `tokio::task::spawn_blocking` as appropriate.
---
## Module Structure
### Follow the `Score` / `Interpret` pattern
Modules that represent deployable infrastructure should implement `Score<T: Topology>` and `Interpret<T>`:
- `Score` is the serializable, clonable configuration declaring *what* to deploy
- `Interpret` does the actual work when `execute()` is called
```rust
pub struct KvmScore {
network: NetworkConfig,
vms: Vec<VmConfig>,
}
impl<T: Topology + KvmHost> Score<T> for KvmScore {
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(KvmInterpret::new(self.clone()))
}
fn name(&self) -> String { "KvmScore".to_string() }
}
```
### Flatten the public API in `mod.rs`
Internal submodules are implementation detail. Re-export what callers need at the module root:
```rust
// modules/kvm/mod.rs
mod connection;
mod domain;
mod network;
mod error;
mod xml;
pub use connection::KvmConnection;
pub use domain::{VmConfig, VmConfigBuilder, VmStatus, DiskConfig, BootDevice};
pub use error::KvmError;
pub use network::NetworkConfig;
```
---
## Commit Style
Follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/):
```
feat(kvm): add network isolation support
fix(kvm): correct memory unit conversion for libvirt
refactor(kvm): replace virsh subprocess calls with virt crate bindings
docs: add coding guide
```
Keep pull requests small and single-purpose (under ~200 lines excluding generated code). Do not mix refactoring, bug fixes, and new features in one PR.
---
## When to Add Abstractions
Harmony provides powerful abstraction mechanisms: traits, generics, the Score/Interpret pattern, and capabilities. Use them judiciously.
### Add an abstraction when:
- **You have three or more concrete implementations** doing the same thing. Two is often coincidence; three is a pattern.
- **The abstraction provides compile-time safety** that prevents real bugs (e.g., capability bounds on topologies).
- **The abstraction hides genuine complexity** that callers shouldn't need to understand (e.g., XML schema generation for libvirt).
### Don't add an abstraction when:
- **It's just to avoid a few lines of boilerplate**. Copy-paste is sometimes better than a trait hierarchy.
- **You're anticipating future flexibility** that isn't needed today. YAGNI (You Aren't Gonna Need It).
- **The abstraction makes the code harder to understand** for someone unfamiliar with the codebase.
- **You're wrapping a single implementation**. A trait with one implementation is usually over-engineering.
### Signs you've over-abstracted:
- You need to explain the type system to a competent Rust developer for them to understand how to add a simple feature.
- Adding a new concrete type requires changes in multiple trait definitions.
- The word "factory" or "manager" appears in your type names.
- You have more trait definitions than concrete implementations.
### The Rule of Three for Traits
Before creating a new trait, ensure you have:
1. A clear, real use case (not hypothetical)
2. At least one concrete implementation
3. A plan for how callers will use it
Only generalize when the pattern is proven. The monitoring module is a good example: we had multiple alert senders (OKD, KubePrometheus, RHOB) before we introduced the `AlertSender` and `AlertReceiver<S>` traits. The traits emerged from real needs, not design sessions.
---
## Documentation
### Document the "why", not the "what"
Code should be self-explanatory for the "what". Comments and documentation should explain intent, rationale, and gotchas.
```rust
// Bad: restates the code
// Returns the number of VMs
fn vm_count(&self) -> usize { self.vms.len() }
// Good: explains the why
// Returns 0 if connection is lost, rather than erroring,
// because monitoring code uses this for health checks
fn vm_count(&self) -> usize { self.vms.len() }
```
### Keep examples in the `examples/` directory
Working code beats documentation. Every major feature should have a runnable example that demonstrates real usage.

View File

@@ -1,40 +0,0 @@
# Core Concepts
Harmony's design is based on a few key concepts. Understanding them is the key to unlocking the framework's power.
### 1. Score
- **What it is:** A **Score** is a declarative description of a desired state. It's a "resource" that defines _what_ you want to achieve, not _how_ to do it.
- **Example:** `ApplicationScore` declares "I want this web application to be running and monitored."
### 2. Topology
- **What it is:** A **Topology** is the logical representation of your infrastructure and its abilities. It's the "where" your Scores will be applied.
- **Key Job:** A Topology's most important job is to expose which `Capabilities` it supports.
- **Example:** `HAClusterTopology` represents a bare-metal cluster and exposes `Capabilities` like `NetworkManager` and `Switch`. `K8sAnywhereTopology` represents a Kubernetes cluster and exposes the `K8sClient` `Capability`.
### 3. Capability
- **What it is:** A **Capability** is a specific feature or API that a `Topology` offers. It's the "how" a `Topology` can fulfill a `Score`'s request.
- **Example:** The `K8sClient` capability offers a way to interact with a Kubernetes API. The `Switch` capability offers a way to configure a physical network switch.
### 4. Interpret
- **What it is:** An **Interpret** is the execution logic that makes a `Score` a reality. It's the "glue" that connects the _desired state_ (`Score`) to the _environment's abilities_ (`Topology`'s `Capabilities`).
- **How it works:** When you apply a `Score`, Harmony finds the matching `Interpret` for your `Topology`. This `Interpret` then uses the `Capabilities` provided by the `Topology` to execute the necessary steps.
### 5. Inventory
- **What it is:** An **Inventory** is the physical material (the "what") used in a cluster. This is most relevant for bare-metal or on-premise topologies.
- **Example:** A list of nodes with their roles (control plane, worker), CPU, RAM, and network interfaces. For the `K8sAnywhereTopology`, the inventory might be empty or autoloaded, as the infrastructure is more abstract.
---
### How They Work Together (The Compile-Time Check)
1. You **write a `Score`** (e.g., `ApplicationScore`).
2. Your `Score`'s `Interpret` logic requires certain **`Capabilities`** (e.g., `K8sClient` and `Ingress`).
3. You choose a **`Topology`** to run it on (e.g., `HAClusterTopology`).
4. **At compile-time**, Harmony checks: "Does `HAClusterTopology` provide the `K8sClient` and `Ingress` capabilities that `ApplicationScore` needs?"
- **If Yes:** Your code compiles. You can be confident it will run.
- **If No:** The compiler gives you an error. You've just prevented a "config-is-valid-but-platform-is-wrong" runtime error before you even deployed.

View File

@@ -1,133 +0,0 @@
## Working procedure to clone and restore CoreOS disk from OKD Cluster
### **Step 1 - take a backup**
```
sudo dd if=/dev/old of=/dev/backup status=progress
```
### **Step 2 - clone beginning of old disk to new**
```
sudo dd if=/dev/old of=/dev/backup status=progress count=1000 bs=1M
```
### **Step 3 - verify and modify disk partitions**
list disk partitions
```
sgdisk -p /dev/new
```
if new disk is smaller than old disk and there is space on the xfs partition of the old disk, modify partitions of new disk
```
gdisk /dev/new
```
inside of gdisk commands
```
-v -> verify table
-p -> print table
-d -> select partition to delete partition
-n -> recreate partition with same partition number as deleted partition
```
For end sector, either specify the new end or just press Enter for maximum available
When asked about partition type, enter the same type code (it will show the old one)
```
p - >to verify
w -> to write
```
make xfs file system for new partition <new4>
```
sudo mkfs.xfs -f /dev/new4
```
### **Step 4 - copy old PARTUUID **
**careful here**
get old patuuid:
```
sgdisk -i <partition_number> /dev/old_disk # Note the "Partition unique GUID"
```
get labels
```
sgdisk -p /dev/old_disk # Shows partition names in the table
blkid /dev/old_disk* # Shows PARTUUIDs and labels for all partitions
```
set it on new disk
```
sgdisk -u <partition_number>:<old_partuuid> /dev/sdc
```
partition name:
```
sgdisk -c <partition_number>:"<old_name>" /dev/sdc
```
verify all:
```
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/old_disk
```
### **Step 5 - Mount disks and copy files from old to new disk**
mount files before copy:
```
mkdir -p /mnt/new
mkdir -p /mnt/old
mount /dev/old4 /mnt/old
mount /dev/new4 /mnt/new
```
copy:
with -n flag can run as dry-run
```
rsync -aAXHvn --numeric-ids /source/ /destination/
```
```
rsync -aAXHv --numeric-ids /source/ /destination/
```
### **Step 6 - Set correct UUID for new partition 4**
to set uuid with xfs_admin you must unmount first
unmount old devices
```
umount /mnt/new
umount /mnt/old
```
to set correct uuid for partition 4
```
blkid /dev/old4
```
```
xfs_admin -U <old_uuid> /dev/new_partition
```
to set labels
get it
```
sgdisk -i 4 /dev/sda | grep "Partition name"
```
set it
```
sgdisk -c 4:"<label_name>" /dev/sdc
or
(check existing with xfs_admin -l /dev/old_partition)
Use xfs_admin -L <label> /dev/new_partition
```
### **Step 7 - Verify**
verify everything:
```
sgdisk -p /dev/sda # Old disk
sgdisk -p /dev/sdc # New disk
```
```
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sda
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sdc
```
```
blkid /dev/sda* | grep UUID=
blkid /dev/sdc* | grep UUID=
```

View File

@@ -1,56 +0,0 @@
## **Remove Worker flag from OKD Control Planes**
### **Context**
On OKD user provisioned infrastructure the control plane nodes can have the flag node-role.kubernetes.io/worker which allows non critical workloads to be scheduled on the control-planes
### **Observed Symptoms**
- After adding HAProxy servers to the backend each back end appears down
- Traffic is redirected to the control planes instead of workers
- The pods router-default are incorrectly applied on the control planes rather than on the workers
- Pods are being scheduled on the control planes causing cluster instability
```
ss -tlnp | grep 80
```
- shows process haproxy is listening at 0.0.0.0:80 on cps
- same problem for port 443
- In namespace rook-ceph certain pods are deploted on cps rather than on worker nodes
### **Cause**
- when intalling UPI, the roles (master, worker) are not managed by the Machine Config operator and the cps are made schedulable by default.
### **Diagnostic**
check node labels:
```
oc get nodes --show-labels | grep control-plane
```
Inspecter kubelet configuration:
```
cat /etc/systemd/system/kubelet.service
```
find the line:
```
--node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node-role.kubernetes.io/worker
```
→ presence of label worker confirms the problem.
Verify the flag doesnt come from MCO
```
oc get machineconfig | grep rendered-master
```
**Solution:**
To make the control planes non schedulable you must patch the cluster scheduler resource
```
oc patch scheduler cluster --type merge -p '{"spec":{"mastersSchedulable":false}}'
```
after the patch is applied the workloads can be deplaced by draining the nodes
```
oc adm cordon <cp-node>
oc adm drain <cp-node> --ignore-daemonsets delete-emptydir-data
```

View File

@@ -1,42 +0,0 @@
# Getting Started Guide
Welcome to Harmony! This guide will walk you through installing the Harmony framework, setting up a new project, and deploying your first application.
We will build and deploy the "Rust Web App" example, which automatically:
1. Provisions a local K3D (Kubernetes in Docker) cluster.
2. Deploys a sample Rust web application.
3. Sets up monitoring for the application.
## Prerequisites
Before you begin, you'll need a few tools installed on your system:
- **Rust & Cargo:** [Install Rust](https://www.rust-lang.org/tools/install)
- **Docker:** [Install Docker](https://docs.docker.com/get-docker/) (Required for the K3D local cluster)
- **kubectl:** [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (For inspecting the cluster)
## 1. Install Harmony
First, clone the Harmony repository and build the project. This gives you the `harmony` CLI and all the core libraries.
```bash
# Clone the main repository
git clone https://git.nationtech.io/nationtech/harmony
cd harmony
# Build the project (this may take a few minutes)
cargo build --release
```
...
## Next Steps
Congratulations, you've just deployed an application using true infrastructure-as-code!
From here, you can:
- [Explore the Catalogs](../catalogs/README.md): See what other [Scores](../catalogs/scores.md) and [Topologies](../catalogs/topologies.md) are available.
- [Read the Use Cases](../use-cases/README.md): Check out the [OKD on Bare Metal](./use-cases/okd-on-bare-metal.md) guide for a more advanced scenario.
- [Write your own Score](../guides/writing-a-score.md): Dive into the [Developer Guide](./guides/developer-guide.md) to start building your own components.

View File

@@ -1,105 +0,0 @@
# Design Document: Harmony PostgreSQL Module
**Status:** Draft
**Last Updated:** 2025-12-01
**Context:** Multi-site Data Replication & Orchestration
## 1. Overview
The Harmony PostgreSQL Module provides a high-level abstraction for deploying and managing high-availability PostgreSQL clusters across geographically distributed Kubernetes/OKD sites.
Instead of manually configuring complex replication slots, firewalls, and operator settings on each cluster, users define a single intent (a **Score**), and Harmony orchestrates the underlying infrastructure (the **Arrangement**) to establish a Primary-Replica architecture.
Currently, the implementation relies on the **CloudNativePG (CNPG)** operator as the backing engine.
## 2. Architecture
### 2.1 The Abstraction Model
Following **ADR 003 (Infrastructure Abstraction)**, Harmony separates the *intent* from the *implementation*.
1. **The Score (Intent):** The user defines a `MultisitePostgreSQL` resource. This describes *what* is needed (e.g., "A Postgres 15 cluster with 10GB storage, Primary on Site A, Replica on Site B").
2. **The Interpret (Action):** Harmony MultisitePostgreSQLInterpret processes this Score and orchestrates the deployment on both sites to reach the state defined in the Score.
3. **The Capability (Implementation):** The PostgreSQL Capability is implemented by the K8sTopology and the interpret can deploy it, configure it and fetch information about it. The concrete implementation will rely on the mature CloudnativePG operator to manage all the Kubernetes resources required.
### 2.2 Network Connectivity (TLS Passthrough)
One of the critical challenges in multi-site orchestration is secure connectivity between clusters that may have dynamic IPs or strict firewalls.
To solve this, we utilize **OKD/OpenShift Routes with TLS Passthrough**.
* **Mechanism:** The Primary site exposes a `Route` configured for `termination: passthrough`.
* **Routing:** The OpenShift HAProxy router inspects the **SNI (Server Name Indication)** header of the incoming TCP connection to route traffic to the correct PostgreSQL Pod.
* **Security:** SSL is **not** terminated at the ingress router. The encrypted stream is passed directly to the PostgreSQL instance. Mutual TLS (mTLS) authentication is handled natively by CNPG between the Primary and Replica instances.
* **Dynamic IPs:** Because connections are established via DNS hostnames (the Route URL), this architecture is resilient to dynamic IP changes at the Primary site.
#### Traffic Flow Diagram
```text
[ Site B: Replica ] [ Site A: Primary ]
| |
(CNPG Instance) --[Encrypted TCP]--> (OKD HAProxy Router)
| (Port 443) |
| |
| [SNI Inspection]
| |
| v
| (PostgreSQL Primary Pod)
| (Port 5432)
```
## 3. Design Decisions
### Why CloudNativePG?
We selected CloudNativePG because it relies exclusively on standard Kubernetes primitives and uses the native PostgreSQL replication protocol (WAL shipping/Streaming). This aligns with Harmony's goal of being "K8s Native."
### Why TLS Passthrough instead of VPN/NodePort?
* **NodePort:** Requires static IPs and opening non-standard ports on the firewall, which violates our security constraints.
* **VPN (e.g., Wireguard/Tailscale):** While secure, it introduces significant complexity (sidecars, key management) and external dependencies.
* **TLS Passthrough:** Leverages the existing Ingress/Router infrastructure already present in OKD. It requires zero additional software and respects multi-tenancy (Routes are namespaced).
### Configuration Philosophy (YAGNI)
The current design exposes a **generic configuration surface**. Users can configure standard parameters (Storage size, CPU/Memory requests, Postgres version).
**We explicitly do not expose advanced CNPG or PostgreSQL configurations at this stage.**
* **Reasoning:** We aim to keep the API surface small and manageable.
* **Future Path:** We plan to implement a "pass-through" mechanism to allow sending raw config maps or custom parameters to the underlying engine (CNPG) *only when a concrete use case arises*. Until then, we adhere to the **YAGNI (You Ain't Gonna Need It)** principle to avoid premature optimization and API bloat.
## 4. Usage Guide
To deploy a multi-site cluster, apply the `MultisitePostgreSQL` resource to the Harmony Control Plane.
### Example Manifest
```yaml
apiVersion: harmony.io/v1alpha1
kind: MultisitePostgreSQL
metadata:
name: finance-db
namespace: tenant-a
spec:
version: "15"
storage: "10Gi"
resources:
requests:
cpu: "500m"
memory: "1Gi"
# Topology Definition
topology:
primary:
site: "site-paris" # The name of the cluster in Harmony
replicas:
- site: "site-newyork"
```
### What happens next?
1. Harmony detects the CR.
2. **On Site Paris:** It deploys a CNPG Cluster (Primary) and creates a Passthrough Route `postgres-finance-db.apps.site-paris.example.com`.
3. **On Site New York:** It deploys a CNPG Cluster (Replica) configured with `externalClusters` pointing to the Paris Route.
4. Data begins replicating immediately over the encrypted channel.
## 5. Troubleshooting
* **Connection Refused:** Ensure the Primary site's Route is successfully admitted by the Ingress Controller.
* **Certificate Errors:** CNPG manages mTLS automatically. If errors persist, ensure the CA secrets were correctly propagated by Harmony from Primary to Replica namespaces.

View File

@@ -1,443 +0,0 @@
# Monitoring and Alerting in Harmony
Harmony provides a unified, type-safe approach to monitoring and alerting across Kubernetes, OpenShift, and bare-metal infrastructure. This guide explains the architecture and how to use it at different levels of abstraction.
## Overview
Harmony's monitoring module supports three distinct use cases:
| Level | Who Uses It | What It Provides |
|-------|-------------|------------------|
| **Cluster** | Cluster administrators | Full control over monitoring stack, cluster-wide alerts, external scrape targets |
| **Tenant** | Platform teams | Namespace-scoped monitoring in multi-tenant environments |
| **Application** | Application developers | Zero-config monitoring that "just works" |
Each level builds on the same underlying abstractions, ensuring consistency while providing appropriate complexity for each audience.
## Core Concepts
### AlertSender
An `AlertSender` represents the system that evaluates alert rules and sends notifications. Harmony supports multiple monitoring stacks:
| Sender | Description | Use When |
|--------|-------------|----------|
| `OpenshiftClusterAlertSender` | OKD/OpenShift built-in monitoring | Running on OKD/OpenShift |
| `KubePrometheus` | kube-prometheus-stack via Helm | Standard Kubernetes, need full stack |
| `Prometheus` | Standalone Prometheus | Custom Prometheus deployment |
| `RedHatClusterObservability` | RHOB operator | Red Hat managed clusters |
| `Grafana` | Grafana-managed alerting | Grafana as primary alerting layer |
### AlertReceiver
An `AlertReceiver` defines where alerts are sent (Discord, Slack, email, webhook, etc.). Receivers are parameterized by sender type because each monitoring stack has different configuration formats.
```rust
pub trait AlertReceiver<S: AlertSender> {
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError>;
fn name(&self) -> String;
}
```
Built-in receivers:
- `DiscordReceiver` - Discord webhooks
- `WebhookReceiver` - Generic HTTP webhooks
### AlertRule
An `AlertRule` defines a Prometheus alert expression. Rules are also parameterized by sender to handle different CRD formats.
```rust
pub trait AlertRule<S: AlertSender> {
fn build_rule(&self) -> Result<serde_json::Value, InterpretError>;
fn name(&self) -> String;
}
```
### Observability Capability
Topologies implement `Observability<S>` to indicate they support a specific alert sender:
```rust
impl Observability<OpenshiftClusterAlertSender> for K8sAnywhereTopology {
async fn install_receivers(&self, sender, inventory, receivers) { ... }
async fn install_rules(&self, sender, inventory, rules) { ... }
// ...
}
```
This provides **compile-time verification**: if you try to use `OpenshiftClusterAlertScore` with a topology that doesn't implement `Observability<OpenshiftClusterAlertSender>`, the code won't compile.
---
## Level 1: Cluster Monitoring
Cluster monitoring is for administrators who need full control over the monitoring infrastructure. This includes:
- Installing/managing the monitoring stack
- Configuring cluster-wide alert receivers
- Defining cluster-level alert rules
- Adding external scrape targets (e.g., bare-metal servers, firewalls)
### Example: OKD Cluster Alerts
```rust
use harmony::{
modules::monitoring::{
alert_channel::discord_alert_channel::DiscordReceiver,
alert_rule::{alerts::k8s::pvc::high_pvc_fill_rate_over_two_days, prometheus_alert_rule::AlertManagerRuleGroup},
okd::openshift_cluster_alerting_score::OpenshiftClusterAlertScore,
scrape_target::prometheus_node_exporter::PrometheusNodeExporter,
},
topology::{K8sAnywhereTopology, monitoring::{AlertMatcher, AlertRoute, MatchOp}},
};
let severity_matcher = AlertMatcher {
label: "severity".to_string(),
operator: MatchOp::Eq,
value: "critical".to_string(),
};
let rule_group = AlertManagerRuleGroup::new(
"cluster-rules",
vec![high_pvc_fill_rate_over_two_days()],
);
let external_exporter = PrometheusNodeExporter {
job_name: "firewall".to_string(),
metrics_path: "/metrics".to_string(),
listen_address: ip!("192.168.1.1"),
port: 9100,
..Default::default()
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(OpenshiftClusterAlertScore {
sender: OpenshiftClusterAlertSender,
receivers: vec![Box::new(DiscordReceiver {
name: "critical-alerts".to_string(),
url: hurl!("https://discord.com/api/webhooks/..."),
route: AlertRoute {
matchers: vec![severity_matcher],
..AlertRoute::default("critical-alerts".to_string())
},
})],
rules: vec![Box::new(rule_group)],
scrape_targets: Some(vec![Box::new(external_exporter)]),
})],
None,
).await?;
```
### What This Does
1. **Enables cluster monitoring** - Activates OKD's built-in Prometheus
2. **Enables user workload monitoring** - Allows namespace-scoped rules
3. **Configures Alertmanager** - Adds Discord receiver with route matching
4. **Deploys alert rules** - Creates `AlertingRule` CRD with PVC fill rate alert
5. **Adds external scrape target** - Configures Prometheus to scrape the firewall
### Compile-Time Safety
The `OpenshiftClusterAlertScore` requires:
```rust
impl<T: Topology + Observability<OpenshiftClusterAlertSender>> Score<T>
for OpenshiftClusterAlertScore
```
If `K8sAnywhereTopology` didn't implement `Observability<OpenshiftClusterAlertSender>`, this code would fail to compile. You cannot accidentally deploy OKD alerts to a cluster that doesn't support them.
---
## Level 2: Tenant Monitoring
In multi-tenant clusters, teams are often confined to specific namespaces. Tenant monitoring adapts to this constraint:
- Resources are deployed in the tenant's namespace
- Cannot modify cluster-level monitoring configuration
- The topology determines namespace context at runtime
### How It Works
The topology's `Observability` implementation handles tenant scoping:
```rust
impl Observability<KubePrometheus> for K8sAnywhereTopology {
async fn install_rules(&self, sender, inventory, rules) {
// Topology knows if it's tenant-scoped
let namespace = self.get_tenant_config().await
.map(|t| t.name)
.unwrap_or_else(|| "monitoring".to_string());
// Rules are installed in the appropriate namespace
for rule in rules.unwrap_or_default() {
let score = KubePrometheusRuleScore {
sender: sender.clone(),
rule,
namespace: namespace.clone(), // Tenant namespace
};
score.create_interpret().execute(inventory, self).await?;
}
}
}
```
### Tenant vs Cluster Resources
| Resource | Cluster-Level | Tenant-Level |
|----------|---------------|--------------|
| Alertmanager config | Global receivers | Namespaced receivers (where supported) |
| PrometheusRules | Cluster-wide alerts | Namespace alerts only |
| ServiceMonitors | Any namespace | Own namespace only |
| External scrape targets | Can add | Cannot add (cluster config) |
### Runtime Validation
Tenant constraints are validated at runtime via Kubernetes RBAC. If a tenant-scoped deployment attempts cluster-level operations, it fails with a clear permission error from the Kubernetes API.
This cannot be fully compile-time because tenant context is determined by who's running the code and what permissions they have—information only available at runtime.
---
## Level 3: Application Monitoring
Application monitoring provides zero-config, opinionated monitoring for developers. Just add the `Monitoring` feature to your application and it works.
### Example
```rust
use harmony::modules::{
application::{Application, ApplicationFeature},
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
};
// Define your application
let my_app = MyApplication::new();
// Add monitoring as a feature
let monitoring = Monitoring {
application: Arc::new(my_app),
alert_receiver: vec![], // Uses defaults
};
// Install with the application
my_app.add_feature(monitoring);
```
### What Application Monitoring Provides
1. **Automatic ServiceMonitor** - Creates a ServiceMonitor for your application's pods
2. **Ntfy Notification Channel** - Auto-installs and configures Ntfy for push notifications
3. **Tenant Awareness** - Automatically scopes to the correct namespace
4. **Sensible Defaults** - Pre-configured alert routes and receivers
### Under the Hood
```rust
impl<T: Topology + Observability<Prometheus> + TenantManager>
ApplicationFeature<T> for Monitoring
{
async fn ensure_installed(&self, topology: &T) -> Result<...> {
// 1. Get tenant namespace (or use app name)
let namespace = topology.get_tenant_config().await
.map(|ns| ns.name.clone())
.unwrap_or_else(|| self.application.name());
// 2. Create ServiceMonitor for the app
let app_service_monitor = ServiceMonitor {
metadata: ObjectMeta {
name: Some(self.application.name()),
namespace: Some(namespace.clone()),
..Default::default()
},
spec: ServiceMonitorSpec::default(),
};
// 3. Install Ntfy for notifications
let ntfy = NtfyScore { namespace, host };
ntfy.interpret(&Inventory::empty(), topology).await?;
// 4. Wire up webhook receiver to Ntfy
let ntfy_receiver = WebhookReceiver { ... };
// 5. Execute monitoring score
alerting_score.interpret(&Inventory::empty(), topology).await?;
}
}
```
---
## Pre-Built Alert Rules
Harmony provides a library of common alert rules in `modules/monitoring/alert_rule/alerts/`:
### Kubernetes Alerts (`alerts/k8s/`)
```rust
use harmony::modules::monitoring::alert_rule::alerts::k8s::{
pod::pod_failed,
pvc::high_pvc_fill_rate_over_two_days,
memory_usage::alert_high_memory_usage,
};
let rules = AlertManagerRuleGroup::new("k8s-rules", vec![
pod_failed(),
high_pvc_fill_rate_over_two_days(),
alert_high_memory_usage(),
]);
```
Available rules:
- `pod_failed()` - Pod in failed state
- `alert_container_restarting()` - Container restart loop
- `alert_pod_not_ready()` - Pod not ready for extended period
- `high_pvc_fill_rate_over_two_days()` - PVC will fill within 2 days
- `alert_high_memory_usage()` - Memory usage above threshold
- `alert_high_cpu_usage()` - CPU usage above threshold
### Infrastructure Alerts (`alerts/infra/`)
```rust
use harmony::modules::monitoring::alert_rule::alerts::infra::opnsense::high_http_error_rate;
let rules = AlertManagerRuleGroup::new("infra-rules", vec![
high_http_error_rate(),
]);
```
### Creating Custom Rules
```rust
use harmony::modules::monitoring::alert_rule::prometheus_alert_rule::PrometheusAlertRule;
pub fn my_custom_alert() -> PrometheusAlertRule {
PrometheusAlertRule::new("MyServiceDown", "up{job=\"my-service\"} == 0")
.for_duration("5m")
.label("severity", "critical")
.annotation("summary", "My service is down")
.annotation("description", "The my-service job has been down for more than 5 minutes")
}
```
---
## Alert Receivers
### Discord Webhook
```rust
use harmony::modules::monitoring::alert_channel::discord_alert_channel::DiscordReceiver;
use harmony::topology::monitoring::{AlertRoute, AlertMatcher, MatchOp};
let discord = DiscordReceiver {
name: "ops-alerts".to_string(),
url: hurl!("https://discord.com/api/webhooks/123456/abcdef"),
route: AlertRoute {
receiver: "ops-alerts".to_string(),
matchers: vec![AlertMatcher {
label: "severity".to_string(),
operator: MatchOp::Eq,
value: "critical".to_string(),
}],
group_by: vec!["alertname".to_string()],
repeat_interval: Some("30m".to_string()),
continue_matching: false,
children: vec![],
},
};
```
### Generic Webhook
```rust
use harmony::modules::monitoring::alert_channel::webhook_receiver::WebhookReceiver;
let webhook = WebhookReceiver {
name: "custom-webhook".to_string(),
url: hurl!("https://api.example.com/alerts"),
route: AlertRoute::default("custom-webhook".to_string()),
};
```
---
## Adding a New Monitoring Stack
To add support for a new monitoring stack:
1. **Create the sender type** in `modules/monitoring/my_sender/mod.rs`:
```rust
#[derive(Debug, Clone)]
pub struct MySender;
impl AlertSender for MySender {
fn name(&self) -> String { "MySender".to_string() }
}
```
2. **Define CRD types** in `modules/monitoring/my_sender/crd/`:
```rust
#[derive(CustomResource, Debug, Serialize, Deserialize, Clone)]
#[kube(group = "monitoring.example.com", version = "v1", kind = "MyAlertRule")]
pub struct MyAlertRuleSpec { ... }
```
3. **Implement Observability** in `domain/topology/k8s_anywhere/observability/my_sender.rs`:
```rust
impl Observability<MySender> for K8sAnywhereTopology {
async fn install_receivers(&self, sender, inventory, receivers) { ... }
async fn install_rules(&self, sender, inventory, rules) { ... }
// ...
}
```
4. **Implement receiver conversions** for existing receivers:
```rust
impl AlertReceiver<MySender> for DiscordReceiver {
fn build(&self) -> Result<ReceiverInstallPlan, InterpretError> {
// Convert DiscordReceiver to MySender's format
}
}
```
5. **Create score types**:
```rust
pub struct MySenderAlertScore {
pub sender: MySender,
pub receivers: Vec<Box<dyn AlertReceiver<MySender>>>,
pub rules: Vec<Box<dyn AlertRule<MySender>>>,
}
```
---
## Architecture Principles
### Type Safety Over Flexibility
Each monitoring stack has distinct CRDs and configuration formats. Rather than a unified "MonitoringStack" type that loses stack-specific features, we use generic traits that provide type safety while allowing each stack to express its unique configuration.
### Compile-Time Capability Verification
The `Observability<S>` bound ensures you can't deploy OKD alerts to a KubePrometheus cluster. The compiler catches platform mismatches before deployment.
### Explicit Over Implicit
Monitoring stacks are chosen explicitly (`OpenshiftClusterAlertSender` vs `KubePrometheus`). There's no "auto-detection" that could lead to surprising behavior.
### Three Levels, One Foundation
Cluster, tenant, and application monitoring all use the same traits (`AlertSender`, `AlertReceiver`, `AlertRule`). The difference is in how scores are constructed and how topologies interpret them.
---
## Related Documentation
- [ADR-020: Monitoring and Alerting Architecture](../adr/020-monitoring-alerting-architecture.md)
- [ADR-013: Monitoring Notifications (ntfy)](../adr/013-monitoring-notifications.md)
- [ADR-011: Multi-Tenant Cluster Architecture](../adr/011-multi-tenant-cluster.md)
- [Coding Guide](coding-guide.md)
- [Core Concepts](concepts.md)

Binary file not shown.

View File

@@ -7,7 +7,7 @@ use harmony::{
monitoring::alert_channel::webhook_receiver::WebhookReceiver,
tenant::TenantScore,
},
topology::{K8sAnywhereTopology, monitoring::AlertRoute, tenant::TenantConfig},
topology::{K8sAnywhereTopology, tenant::TenantConfig},
};
use harmony_types::id::Id;
use harmony_types::net::Url;
@@ -27,20 +27,14 @@ async fn main() {
};
let application = Arc::new(RustWebapp {
name: "example-monitoring".to_string(),
dns: "example-monitoring.harmony.mcd".to_string(),
project_root: PathBuf::from("./examples/rust/webapp"),
framework: Some(RustWebFramework::Leptos),
service_port: 3000,
});
let receiver_name = "sample-webhook-receiver".to_string();
let webhook_receiver = WebhookReceiver {
name: receiver_name.clone(),
name: "sample-webhook-receiver".to_string(),
url: Url::Url(url::Url::parse("https://webhook-doesnt-exist.com").unwrap()),
route: AlertRoute {
..AlertRoute::default(receiver_name)
},
};
let app = ApplicationScore {

View File

@@ -1,20 +0,0 @@
[package]
name = "brocade-snmp-server"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
brocade = { path = "../../brocade" }
harmony_secret = { path = "../../harmony_secret" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_macros = { path = "../../harmony_macros" }
tokio = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
base64.workspace = true
serde.workspace = true

View File

@@ -1,22 +0,0 @@
use std::net::{IpAddr, Ipv4Addr};
use harmony::{
inventory::Inventory, modules::brocade::BrocadeEnableSnmpScore, topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let brocade_snmp_server = BrocadeEnableSnmpScore {
switch_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 1, 111))],
dry_run: true,
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(brocade_snmp_server)],
None,
)
.await
.unwrap();
}

View File

@@ -1,19 +0,0 @@
[package]
name = "brocade-switch"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
url.workspace = true
async-trait.workspace = true
serde.workspace = true
log.workspace = true
env_logger.workspace = true
brocade = { path = "../../brocade" }

View File

@@ -1,50 +0,0 @@
use std::str::FromStr;
use brocade::{BrocadeOptions, PortOperatingMode};
use harmony::{
infra::brocade::BrocadeSwitchConfig,
inventory::Inventory,
modules::brocade::{BrocadeSwitchAuth, BrocadeSwitchScore, SwitchTopology},
};
use harmony_macros::ip;
use harmony_types::{id::Id, switch::PortLocation};
fn get_switch_config() -> BrocadeSwitchConfig {
let mut options = BrocadeOptions::default();
options.ssh.port = 2222;
let auth = BrocadeSwitchAuth {
username: "admin".to_string(),
password: "password".to_string(),
};
BrocadeSwitchConfig {
ips: vec![ip!("127.0.0.1")],
auth,
options,
}
}
#[tokio::main]
async fn main() {
let switch_score = BrocadeSwitchScore {
port_channels_to_clear: vec![
Id::from_str("17").unwrap(),
Id::from_str("19").unwrap(),
Id::from_str("18").unwrap(),
],
ports_to_configure: vec![
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
],
};
harmony_cli::run(
Inventory::autoload(),
SwitchTopology::new(get_switch_config()).await,
vec![Box::new(switch_score)],
None,
)
.await
.unwrap();
}

View File

@@ -1,19 +0,0 @@
[package]
name = "cert_manager"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
assert_cmd = "2.0.16"

View File

@@ -1,42 +0,0 @@
use harmony::{
inventory::Inventory,
modules::cert_manager::{
capability::CertificateManagementConfig, score_certificate::CertificateScore,
score_issuer::CertificateIssuerScore,
},
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let config = CertificateManagementConfig {
namespace: Some("test".to_string()),
acme_issuer: None,
ca_issuer: None,
self_signed: true,
};
let issuer_name = "test-self-signed-issuer".to_string();
let issuer = CertificateIssuerScore {
issuer_name: issuer_name.clone(),
config: config.clone(),
};
let cert = CertificateScore {
config: config.clone(),
issuer_name,
cert_name: "test-self-signed-cert".to_string(),
common_name: None,
dns_names: Some(vec!["test.dns.name".to_string()]),
is_ca: Some(false),
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(issuer), Box::new(cert)],
None,
)
.await
.unwrap();
}

View File

@@ -2,7 +2,7 @@ use harmony::{
inventory::Inventory,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
inventory::LaunchDiscoverInventoryAgentScore,
},
topology::LocalhostTopology,
};
@@ -18,7 +18,6 @@ async fn main() {
Box::new(PanicScore {}),
Box::new(LaunchDiscoverInventoryAgentScore {
discovery_timeout: Some(10),
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
}),
],
None,

View File

@@ -1,15 +0,0 @@
[package]
name = "harmony_inventory_builder"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
url.workspace = true
cidr.workspace = true

View File

@@ -1,11 +0,0 @@
cargo build -p harmony_inventory_builder --release --target x86_64-unknown-linux-musl
SCRIPT_DIR="$(dirname ${0})"
cd "${SCRIPT_DIR}/docker/"
cp ../../../target/x86_64-unknown-linux-musl/release/harmony_inventory_builder .
docker build . -t hub.nationtech.io/harmony/harmony_inventory_builder
docker push hub.nationtech.io/harmony/harmony_inventory_builder

View File

@@ -1,10 +0,0 @@
FROM debian:12-slim
RUN mkdir /app
WORKDIR /app/
COPY harmony_inventory_builder /app/
ENV RUST_LOG=info
CMD ["sleep", "infinity"]

View File

@@ -1,36 +0,0 @@
use harmony::{
inventory::{HostRole, Inventory},
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
topology::LocalhostTopology,
};
use harmony_macros::cidrv4;
#[tokio::main]
async fn main() {
let discover_worker = DiscoverHostForRoleScore {
role: HostRole::Worker,
number_desired_hosts: 3,
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
cidr: cidrv4!("192.168.0.1/25"),
port: 25000,
},
};
let discover_control_plane = DiscoverHostForRoleScore {
role: HostRole::ControlPlane,
number_desired_hosts: 3,
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
cidr: cidrv4!("192.168.0.1/25"),
port: 25000,
},
};
harmony_cli::run(
Inventory::autoload(),
LocalhostTopology::new(),
vec![Box::new(discover_worker), Box::new(discover_control_plane)],
None,
)
.await
.unwrap();
}

View File

@@ -1,21 +0,0 @@
[package]
name = "example-k8s-drain-node"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_macros = { path = "../../harmony_macros" }
harmony-k8s = { path = "../../harmony-k8s" }
cidr.workspace = true
tokio.workspace = true
log.workspace = true
env_logger.workspace = true
url.workspace = true
assert_cmd = "2.0.16"
inquire.workspace = true

View File

@@ -1,61 +0,0 @@
use std::time::Duration;
use harmony_k8s::{DrainOptions, K8sClient};
use log::{info, trace};
#[tokio::main]
async fn main() {
env_logger::init();
let k8s = K8sClient::try_default().await.unwrap();
let nodes = k8s.get_nodes(None).await.unwrap();
trace!("Got nodes : {nodes:#?}");
let node_names = nodes
.iter()
.map(|n| n.metadata.name.as_ref().unwrap())
.collect::<Vec<&String>>();
info!("Got nodes : {:?}", node_names);
let node_name = inquire::Select::new("What node do you want to operate on?", node_names)
.prompt()
.unwrap();
let drain = inquire::Confirm::new("Do you wish to drain the node now ?")
.prompt()
.unwrap();
if drain {
let mut options = DrainOptions::default_ignore_daemonset_delete_emptydir_data();
options.timeout = Duration::from_secs(1);
k8s.drain_node(&node_name, &options).await.unwrap();
info!("Node {node_name} successfully drained");
}
let uncordon =
inquire::Confirm::new("Do you wish to uncordon node to resume scheduling workloads now?")
.prompt()
.unwrap();
if uncordon {
info!("Uncordoning node {node_name}");
k8s.uncordon_node(node_name).await.unwrap();
info!("Node {node_name} uncordoned");
}
let reboot = inquire::Confirm::new("Do you wish to reboot node now?")
.prompt()
.unwrap();
if reboot {
k8s.reboot_node(
&node_name,
&DrainOptions::default_ignore_daemonset_delete_emptydir_data(),
Duration::from_secs(3600),
)
.await
.unwrap();
}
info!("All done playing with nodes, happy harmonizing!");
}

View File

@@ -1,21 +0,0 @@
[package]
name = "example-k8s-write-file-on-node"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_macros = { path = "../../harmony_macros" }
harmony-k8s = { path = "../../harmony-k8s" }
cidr.workspace = true
tokio.workspace = true
log.workspace = true
env_logger.workspace = true
url.workspace = true
assert_cmd = "2.0.16"
inquire.workspace = true

View File

@@ -1,45 +0,0 @@
use harmony_k8s::{K8sClient, NodeFile};
use log::{info, trace};
#[tokio::main]
async fn main() {
env_logger::init();
let k8s = K8sClient::try_default().await.unwrap();
let nodes = k8s.get_nodes(None).await.unwrap();
trace!("Got nodes : {nodes:#?}");
let node_names = nodes
.iter()
.map(|n| n.metadata.name.as_ref().unwrap())
.collect::<Vec<&String>>();
info!("Got nodes : {:?}", node_names);
let node = inquire::Select::new("What node do you want to write file to?", node_names)
.prompt()
.unwrap();
let path = inquire::Text::new("File path on node").prompt().unwrap();
let content = inquire::Text::new("File content").prompt().unwrap();
let node_file = NodeFile {
path: path,
content: content,
mode: 0o600,
};
k8s.write_files_to_node(&node, &vec![node_file.clone()])
.await
.unwrap();
let cmd = inquire::Text::new("Command to run on node")
.prompt()
.unwrap();
k8s.run_privileged_command_on_node(&node, &cmd)
.await
.unwrap();
info!(
"File {} mode {} written in node {node}",
node_file.path, node_file.mode
);
}

View File

@@ -1,45 +1,36 @@
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use std::collections::HashMap;
use harmony::{
inventory::Inventory,
modules::monitoring::{
alert_channel::discord_alert_channel::DiscordReceiver,
alert_rule::{
alerts::{
infra::dell_server::{
alert_global_storage_status_critical,
alert_global_storage_status_non_recoverable,
global_storage_status_degraded_non_critical,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordWebhook,
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
kube_prometheus::{
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
types::{
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
ServiceMonitorEndpoint,
},
k8s::pvc::high_pvc_fill_rate_over_two_days,
},
prometheus_alert_rule::AlertManagerRuleGroup,
},
kube_prometheus::{
helm::config::KubePrometheusConfig,
kube_prometheus_alerting_score::KubePrometheusAlertingScore,
types::{
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
ServiceMonitorEndpoint,
prometheus::alerts::{
infra::dell_server::{
alert_global_storage_status_critical, alert_global_storage_status_non_recoverable,
global_storage_status_degraded_non_critical,
},
k8s::pvc::high_pvc_fill_rate_over_two_days,
},
},
topology::{K8sAnywhereTopology, monitoring::AlertRoute},
topology::K8sAnywhereTopology,
};
use harmony_types::{k8s_name::K8sName, net::Url};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let receiver_name = "test-discord".to_string();
let discord_receiver = DiscordReceiver {
name: receiver_name.clone(),
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
route: AlertRoute {
..AlertRoute::default(receiver_name)
},
};
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
@@ -78,15 +69,10 @@ async fn main() {
endpoints: vec![service_monitor_endpoint],
..Default::default()
};
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
let alerting_score = KubePrometheusAlertingScore {
let alerting_score = HelmPrometheusAlertingScore {
receivers: vec![Box::new(discord_receiver)],
rules: vec![Box::new(additional_rules), Box::new(additional_rules2)],
service_monitors: vec![service_monitor],
scrape_targets: None,
config,
};
harmony_cli::run(

View File

@@ -1,37 +1,29 @@
use std::{
collections::HashMap,
str::FromStr,
sync::{Arc, Mutex},
};
use std::{collections::HashMap, str::FromStr};
use harmony::{
inventory::Inventory,
modules::{
monitoring::{
alert_channel::discord_alert_channel::DiscordReceiver,
alert_rule::{
alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
prometheus_alert_rule::AlertManagerRuleGroup,
},
alert_channel::discord_alert_channel::DiscordWebhook,
alert_rule::prometheus_alert_rule::AlertManagerRuleGroup,
kube_prometheus::{
helm::config::KubePrometheusConfig,
kube_prometheus_alerting_score::KubePrometheusAlertingScore,
helm_prometheus_alert_score::HelmPrometheusAlertingScore,
types::{
HTTPScheme, MatchExpression, Operator, Selector, ServiceMonitor,
ServiceMonitorEndpoint,
},
},
},
prometheus::alerts::k8s::pvc::high_pvc_fill_rate_over_two_days,
tenant::TenantScore,
},
topology::{
K8sAnywhereTopology,
monitoring::AlertRoute,
tenant::{ResourceLimits, TenantConfig, TenantNetworkPolicy},
},
};
use harmony_types::id::Id;
use harmony_types::net::Url;
use harmony_types::{id::Id, k8s_name::K8sName};
#[tokio::main]
async fn main() {
@@ -50,13 +42,9 @@ async fn main() {
},
};
let receiver_name = "test-discord".to_string();
let discord_receiver = DiscordReceiver {
name: receiver_name.clone(),
let discord_receiver = DiscordWebhook {
name: "test-discord".to_string(),
url: Url::Url(url::Url::parse("https://discord.doesnt.exist.com").unwrap()),
route: AlertRoute {
..AlertRoute::default(receiver_name)
},
};
let high_pvc_fill_rate_over_two_days_alert = high_pvc_fill_rate_over_two_days();
@@ -85,14 +73,10 @@ async fn main() {
..Default::default()
};
let config = Arc::new(Mutex::new(KubePrometheusConfig::new()));
let alerting_score = KubePrometheusAlertingScore {
let alerting_score = HelmPrometheusAlertingScore {
receivers: vec![Box::new(discord_receiver)],
rules: vec![Box::new(additional_rules)],
service_monitors: vec![service_monitor],
scrape_targets: None,
config,
};
harmony_cli::run(

View File

@@ -1,18 +0,0 @@
[package]
name = "example-multisite-postgres"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -1,3 +0,0 @@
export HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY="context=default/api-your-openshift-cluster:6443/kube:admin"
export HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA="context=someuser/somecluster"
export RUST_LOG="harmony=debug"

View File

@@ -1,28 +0,0 @@
use harmony::{
inventory::Inventory,
modules::postgresql::{PublicPostgreSQLScore, capability::PostgreSQLConfig},
topology::{FailoverTopology, K8sAnywhereTopology},
};
#[tokio::main]
async fn main() {
// env_logger::init();
let postgres = PublicPostgreSQLScore {
config: PostgreSQLConfig {
cluster_name: "harmony-postgres-example".to_string(), // Override default name
namespace: "harmony-public-postgres".to_string(),
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
// "default" namespace, 1 instance, 1Gi storage
},
hostname: "postgrestest.sto1.nationtech.io".to_string(),
};
harmony_cli::run(
Inventory::autoload(),
FailoverTopology::<K8sAnywhereTopology>::from_env(),
vec![Box::new(postgres)],
None,
)
.await
.unwrap();
}

View File

@@ -1,5 +1,5 @@
[package]
name = "example-nats-module-supercluster"
name = "example-nanodc"
edition = "2024"
version.workspace = true
readme.workspace = true
@@ -8,12 +8,12 @@ publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_tui = { path = "../../harmony_tui" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
harmony_secret = { path = "../../harmony_secret" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
k8s-openapi.workspace = true

View File

@@ -0,0 +1,4 @@
#!/bin/bash
helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \
--set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f values.yaml

View File

@@ -0,0 +1,721 @@
# Default values for a single rook-ceph cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Namespace of the main rook operator
operatorNamespace: rook-ceph
# -- The metadata.name of the CephCluster CR
# @default -- The same as the namespace
clusterName:
# -- Optional override of the target kubernetes version
kubeVersion:
# -- Cluster ceph.conf override
configOverride:
# configOverride: |
# [global]
# mon_allow_pool_delete = true
# osd_pool_default_size = 3
# osd_pool_default_min_size = 2
# Installs a debugging toolbox deployment
toolbox:
# -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
enabled: true
# -- Toolbox image, defaults to the image used by the Ceph cluster
image: #quay.io/ceph/ceph:v19.2.2
# -- Toolbox tolerations
tolerations: []
# -- Toolbox affinity
affinity: {}
# -- Toolbox container security context
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- Toolbox resources
resources:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "128Mi"
# -- Set the priority class for the toolbox if desired
priorityClassName:
monitoring:
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
# Monitoring requires Prometheus to be pre-installed
enabled: false
# -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
metricsDisabled: false
# -- Whether to create the Prometheus rules for Ceph alerts
createPrometheusRules: false
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespaceOverride:
# Monitoring settings for external clusters:
# externalMgrEndpoints: <list of endpoints>
# externalMgrPrometheusPort: <port>
# Scrape interval for prometheus
# interval: 10s
# allow adding custom labels and annotations to the prometheus rule
prometheusRule:
# -- Labels applied to PrometheusRule
labels: {}
# -- Annotations applied to PrometheusRule
annotations: {}
# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
pspEnable: false
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret
# All values below are taken from the CephCluster CRD
# -- Cluster configuration.
# @default -- See [below](#ceph-cluster-spec)
cephClusterSpec:
# This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
# as in the host-based example (cluster.yaml). For a different configuration such as a
# PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
# or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
# with the specs from those examples.
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v18 is Reef, v19 is Squid
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.2-20250409
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v19.2.2
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
# Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
# Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
# This configuration will be ignored if `skipUpgradeChecks` is `true`.
# Default is false.
upgradeOSDRequiresHealthyPGs: false
mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 2
allowMultiplePerNode: false
modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
# - name: rook
# enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
# the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
ssl: true
# Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
# The kernel requirements above for encryption also apply to compression.
compression:
enabled: false
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
requireMsgr2: false
# # enable host networking
# provider: host
# # EXPERIMENTAL: enable the Multus network provider
# provider: multus
# selectors:
# # The selector keys are required to be `public` and `cluster`.
# # Based on the configuration, the operator will do the following:
# # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
# # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
# #
# # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
# #
# # public: public-conf --> NetworkAttachmentDefinition object name in Multus
# # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
# # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
# ipFamily: "IPv6"
# # Ceph daemons to listen on both IPv4 and Ipv6 networks
# dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
# daysToRetain: 30
# enable log collector, daemons will log on files and rotate
logCollector:
enabled: true
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# # The above placement information can also be specified for mon, osd, and mgr components
# mon:
# # Monitor deployments may contain an anti-affinity rule for avoiding monitor
# # collocation on the same node. This is a required rule when host network is used
# # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# # preferred rule with weight: 50.
# osd:
# mgr:
# cleanup:
# annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# # If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
# dashboard:
# labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# # These labels can be passed as LabelSelector to Prometheus
# monitoring:
# dashboard:
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
osd:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
prepareosd:
# limits: It is not recommended to set limits on the OSD prepare job
# since it's a one-time burst for memory that must be allowed to
# complete without an OOM kill. Note however that if a k8s
# limitRange guardrail is defined external to Rook, the lack of
# a limit here may result in a sync failure, in which case a
# limit should be added. 1200Mi may suffice for up to 15Ti
# OSDs ; for larger devices 2Gi may be required.
# cf. https://github.com/rook/rook/pull/11103
requests:
cpu: "500m"
memory: "50Mi"
mgr-sidecar:
limits:
memory: "100Mi"
requests:
cpu: "100m"
memory: "40Mi"
crashcollector:
limits:
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "100Mi"
exporter:
limits:
memory: "128Mi"
requests:
cpu: "50m"
memory: "50Mi"
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priority classes to apply to ceph resources
priorityClassNames:
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
# deviceFilter:
# config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# Configure the healthcheck and liveness probes for ceph pods.
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe, it works for all mon, mgr, and osd pods.
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
ingress:
# -- Enable an ingress for the ceph-dashboard
dashboard:
# {}
# labels:
# external-dns/private: "true"
annotations:
"route.openshift.io/termination": "passthrough"
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/server-snippet: |
# proxy_ssl_verify off;
host:
name: ceph.apps.ncd0.harmony.mcd
path: null # TODO the chart does not allow removing the path, and it causes openshift to fail creating a route, because path is not supported with termination mode passthrough
pathType: ImplementationSpecific
tls:
- {}
# secretName: testsecret-tls
# Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
# to set the ingress class
# ingressClassName: openshift-default
# labels:
# external-dns/private: "true"
# annotations:
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/server-snippet: |
# proxy_ssl_verify off;
# host:
# name: dashboard.example.com
# path: "/ceph-dashboard(/|$)(.*)"
# pathType: Prefix
# tls:
# - hosts:
# - dashboard.example.com
# secretName: testsecret-tls
## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
## to set the ingress class
# ingressClassName: nginx
# -- A list of CephBlockPool configurations to deploy
# @default -- See [below](#ceph-block-pools)
cephBlockPools:
- name: ceph-blockpool
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
size: 3
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
# enableRBDStats: true
storageClass:
enabled: true
name: ceph-block
annotations: {}
labels: {}
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
allowedTopologies: []
# - matchLabelExpressions:
# - key: rook-ceph-role
# values:
# - storage-node
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
parameters:
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features, equivalent to OR'd bitfield value: 63
# Available for imageFormat: "2". Older releases of CSI RBD
# support only the `layering` feature. The Linux kernel (KRBD) supports the
# full feature complement as of 5.4
imageFeatures: layering
# These secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- A list of CephFileSystem configurations to deploy
# @default -- See [below](#ceph-file-systems)
cephFileSystems:
- name: ceph-filesystem
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- Settings for the filesystem snapshot class
# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
cephFileSystemVolumeSnapshotClass:
enabled: false
name: ceph-filesystem
isDefault: true
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
parameters: {}
# -- Settings for the block pool snapshot class
# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: ceph-block
isDefault: false
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
parameters: {}
# -- A list of CephObjectStore configurations to deploy
# @default -- See [below](#ceph-object-stores)
cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
parameters:
bulk: "true"
preservePoolsOnDelete: true
gateway:
port: 80
resources:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
# securePort: 443
# sslCertificateRef:
instances: 1
priorityClassName: system-cluster-critical
# opsLogSidecar:
# resources:
# limits:
# memory: "100Mi"
# requests:
# cpu: "100m"
# memory: "40Mi"
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
parameters:
# note: objectStoreNamespace and objectStoreName are configured by the chart
region: us-east-1
ingress:
# Enable an ingress for the ceph-objectstore
enabled: true
# The ingress port by default will be the object store's "securePort" (if set), or the gateway "port".
# To override those defaults, set this ingress port to the desired port.
# port: 80
# annotations: {}
host:
name: objectstore.apps.ncd0.harmony.mcd
path: /
pathType: Prefix
# tls:
# - hosts:
# - objectstore.example.com
# secretName: ceph-objectstore-tls
# ingressClassName: nginx
## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
## For erasure coded a replicated metadata pool is required.
## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
#cephECBlockPools:
# - name: ec-pool
# spec:
# metadataPool:
# replicated:
# size: 2
# dataPool:
# failureDomain: osd
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
# deviceClass: hdd
#
# parameters:
# # clusterID is the namespace where the rook cluster is running
# # If you change this namespace, also change the namespace below where the secret namespaces are defined
# clusterID: rook-ceph # namespace:cluster
# # (optional) mapOptions is a comma-separated list of map options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # mapOptions: lock_on_read,queue_depth=1024
#
# # (optional) unmapOptions is a comma-separated list of unmap options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # unmapOptions: force
#
# # RBD image format. Defaults to "2".
# imageFormat: "2"
#
# # RBD image features, equivalent to OR'd bitfield value: 63
# # Available for imageFormat: "2". Older releases of CSI RBD
# # support only the `layering` feature. The Linux kernel (KRBD) supports the
# # full feature complement as of 5.4
# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
# imageFeatures: layering
#
# storageClass:
# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
# enabled: true
# name: rook-ceph-block
# isDefault: false
# annotations: { }
# labels: { }
# allowVolumeExpansion: true
# reclaimPolicy: Delete
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:

View File

@@ -0,0 +1,3 @@
#!/bin/bash
helm repo add rook-release https://charts.rook.io/release
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml

View File

@@ -0,0 +1,674 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: docker.io/rook/ceph
# -- Image tag
# @default -- `master`
tag: v1.17.1
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
memory: 512Mi
requests:
cpu: 200m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Custom pod labels for the operator
operatorPodLabels: {}
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
rbacAggregate:
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
enableOBCs: false
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- Set the container security context for the operator
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Disable the CSI driver.
disableCsiDriver: "false"
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- Enable volume group snapshot feature. This feature is
# enabled by default as long as the necessary CRDs are available in the cluster.
enableVolumeGroupSnapshot: true
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
cephFSKernelMountOptions:
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName:
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# @default -- `1`
cephFSPluginUpdateStrategyMaxUnavailable:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Burst to use while communicating with the kubernetes apiserver.
kubeApiBurst:
# -- QPS to use while communicating with the kubernetes apiserver.
kubeApiQPS:
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
limits:
memory: 1Gi
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : csi-attacher
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- CSI Addons server port for the RBD provisioner
# @default -- `9070`
csiAddonsRBDProvisionerPort:
# -- CSI Addons server port for the Ceph FS provisioner
# @default -- `9070`
csiAddonsCephFSProvisionerPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
serviceMonitor:
# -- Enable ServiceMonitor for Ceph CSI drivers
enabled: false
# -- Service monitor scrape interval
interval: 10s
# -- ServiceMonitor additional labels
labels: {}
# -- Use a different namespace for the ServiceMonitor
namespace:
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
# -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
# @default -- `137s`
csiLeaderElectionLeaseDuration:
# -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
# @default -- `107s`
csiLeaderElectionRenewDeadline:
# -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
# @default -- `26s`
csiLeaderElectionRetryPeriod:
cephcsi:
# -- Ceph CSI image repository
repository: quay.io/cephcsi/cephcsi
# -- Ceph CSI image tag
tag: v3.14.0
registrar:
# -- Kubernetes CSI registrar image repository
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
# -- Registrar image tag
tag: v2.13.0
provisioner:
# -- Kubernetes CSI provisioner image repository
repository: registry.k8s.io/sig-storage/csi-provisioner
# -- Provisioner image tag
tag: v5.1.0
snapshotter:
# -- Kubernetes CSI snapshotter image repository
repository: registry.k8s.io/sig-storage/csi-snapshotter
# -- Snapshotter image tag
tag: v8.2.0
attacher:
# -- Kubernetes CSI Attacher image repository
repository: registry.k8s.io/sig-storage/csi-attacher
# -- Attacher image tag
tag: v4.8.0
resizer:
# -- Kubernetes CSI resizer image repository
repository: registry.k8s.io/sig-storage/csi-resizer
# -- Resizer image tag
tag: v1.13.1
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons sidecar image repository
repository: quay.io/csiaddons/k8s-sidecar
# -- CSIAddons sidecar image tag
tag: v0.12.0
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
rbdAttachRequired: true
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
nfsAttachRequired: true
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- Set the discovery daemon device discovery interval (default to 60m)
discoveryDaemonInterval: 60m
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- If true, run rook operator on the host network
useOperatorHostNetwork:
# -- If true, scale down the rook operator.
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
# to deploy your helm charts.
scaleDownOperator: false
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity:
# key1=value1,value2; key2=value3
#
# or
#
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: storage-node
# operator: Exists
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
customHostnameLabel:
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
enforceHostNetwork: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
revisionHistoryLimit:
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Specify the prefix for the OBC provisioner in place of the cluster namespace
# @default -- `ceph cluster namespace`
obcProvisionerNamePrefix:
# -- Many OBC additional config fields may be risky for administrators to allow users control over.
# The safe and default-allowed fields are 'maxObjects' and 'maxSize'.
# Other fields should be considered risky. To allow all additional configs, use this value:
# "maxObjects,maxSize,bucketMaxObjects,bucketMaxSize,bucketPolicy,bucketLifecycle,bucketOwner"
# @default -- "maxObjects,maxSize"
obcAllowAdditionalConfigFields: "maxObjects,maxSize"
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false

168
examples/nanodc/src/main.rs Normal file
View File

@@ -0,0 +1,168 @@
use std::{
net::{IpAddr, Ipv4Addr},
sync::Arc,
};
use cidr::Ipv4Cidr;
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::{
http::StaticFilesHttpScore,
okd::{
bootstrap_dhcp::OKDBootstrapDhcpScore,
bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore,
dns::OKDDnsScore, ipxe::OKDIpxeScore,
},
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, mac_address};
use harmony_secret::SecretManager;
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.33.1"),
name: String::from("fw0"),
};
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let lan_subnet = Ipv4Addr::new(192, 168, 33, 0);
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
let gateway_ip = IpAddr::V4(gateway_ipv4);
let topology = harmony::topology::HAClusterTopology {
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
// when setting up the opnsense firewall
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![
LogicalHost {
ip: ip!("192.168.33.20"),
name: "cp0".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.21"),
name: "cp1".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.22"),
name: "cp2".to_string(),
},
],
bootstrap_host: LogicalHost {
ip: ip!("192.168.33.66"),
name: "bootstrap".to_string(),
},
workers: vec![
LogicalHost {
ip: ip!("192.168.33.30"),
name: "wk0".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.31"),
name: "wk1".to_string(),
},
LogicalHost {
ip: ip!("192.168.33.32"),
name: "wk2".to_string(),
},
],
switch: vec![],
};
let inventory = Inventory {
location: Location::new("I am mobile".to_string(), "earth".to_string()),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:0F")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:26")),
// thisone
// Then create the ipxe file
// set the dns static leases
// bootstrap nodes
// start ceph cluster
// try installation of lampscore
// bingo?
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:70")),
],
control_plane_host: vec![
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:60:FA")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:02:61:1A")),
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("C4:62:37:01:BC:68")),
],
};
// TODO regroup smaller scores in a larger one such as this
// let okd_boostrap_preparation();
let bootstrap_dhcp_score = OKDBootstrapDhcpScore::new(&topology, &inventory);
let bootstrap_load_balancer_score = OKDBootstrapLoadBalancerScore::new(&topology);
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
let dns_score = OKDDnsScore::new(&topology);
let load_balancer_score =
harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology);
let ssh_key = SecretManager::get_or_prompt::<SshKeyPair>().await.unwrap();
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
remote_path: None,
};
let kickstart_filename = "inventory.kickstart".to_string();
let harmony_inventory_agent = "harmony_inventory_agent".to_string();
let ipxe_score = OKDIpxeScore {
kickstart_filename,
harmony_inventory_agent,
cluster_pubkey: FileContent {
path: FilePath::Relative("cluster_ssh_key.pub".to_string()),
content: ssh_key.public,
},
};
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(bootstrap_dhcp_score),
Box::new(bootstrap_load_balancer_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(ipxe_score),
Box::new(dhcp_score),
],
)
.await
.unwrap();
}

View File

@@ -1,9 +0,0 @@
# Cluster 1
export HARMONY_DECENTRALIZED_TOPOLOGY_K8S_SITE_1="kubeconfig=$HOME/.kube/config,context=cluster-context"
export HARMONY_NATS_SITE_1_DOMAIN="your.domain.1"
# Cluster 2
export HARMONY_DECENTRALIZED_TOPOLOGY_K8S_SITE_2="kubeconfig=$HOME/.kube/config,context=cluster-context"
export HARMONY_NATS_SITE_2_DOMAIN="your.domain.2"
# Cluster 3
export HARMONY_DECENTRALIZED_TOPOLOGY_K8S_SITE_3="kubeconfig=$HOME/.kube/config,context=cluster-context"
export HARMONY_NATS_SITE_3_DOMAIN="your.domain.3"

View File

@@ -1,77 +0,0 @@
use harmony::{
inventory::Inventory,
modules::nats::{capability::NatsCluster, score_nats_supercluster::NatsSuperclusterScore},
topology::{K8sAnywhereTopology, decentralized::DecentralizedTopology},
};
#[tokio::main]
async fn main() {
let supercluster_ca_secret_name = "nats-supercluster-ca-bundle";
let tls_cert_name = "nats-gateway";
let jetstream_enabled = "false";
let nats_namespace = "nats-example".to_string();
let site_1_name = "site-1".to_string();
let site_1_domain =
std::env::var("HARMONY_NATS_SITE_1_DOMAIN").expect("missing domain in env for site_1");
let nats_site_1 = NatsCluster {
namespace: nats_namespace.clone(),
domain: site_1_domain.clone(),
replicas: 1,
name: site_1_name.clone(),
gateway_advertise: format!("{site_1_name}-gw.{site_1_domain}:443"),
dns_name: format!("{site_1_name}-gw.{site_1_domain}"),
supercluster_ca_secret_name: supercluster_ca_secret_name,
tls_cert_name: tls_cert_name,
jetstream_enabled: jetstream_enabled,
};
let site_2_name = "site-2".to_string();
let site_2_domain =
std::env::var("HARMONY_NATS_SITE_2_DOMAIN").expect("missing domain in env for site_2");
let nats_site_2 = NatsCluster {
namespace: nats_namespace.clone(),
domain: site_2_domain.clone(),
replicas: 1,
name: site_2_name.clone(),
gateway_advertise: format!("{site_2_name}-gw.{site_2_domain}:443"),
dns_name: format!("{site_2_name}-gw.{site_2_domain}"),
supercluster_ca_secret_name: supercluster_ca_secret_name,
tls_cert_name: tls_cert_name,
jetstream_enabled: jetstream_enabled,
};
let site_3_name = "site-3".to_string();
let site_3_domain =
std::env::var("HARMONY_NATS_SITE_3_DOMAIN").expect("missing domain in env for site_3");
let nats_site_3 = NatsCluster {
namespace: nats_namespace.clone(),
domain: site_3_domain.clone(),
replicas: 1,
name: site_3_name.clone(),
gateway_advertise: format!("{site_3_name}-gw.{site_3_domain}:443"),
dns_name: format!("{site_3_name}-gw.{site_3_domain}"),
supercluster_ca_secret_name: supercluster_ca_secret_name,
tls_cert_name: tls_cert_name,
jetstream_enabled: jetstream_enabled,
};
let clusters = vec![nats_site_1, nats_site_2, nats_site_3];
let nats_supercluster = NatsSuperclusterScore {
nats_cluster: clusters,
ca_certs: None,
};
harmony_cli::run(
Inventory::autoload(),
DecentralizedTopology::<K8sAnywhereTopology>::from_env(),
vec![Box::new(nats_supercluster)],
None,
)
.await
.unwrap();
}

View File

@@ -1,19 +0,0 @@
[package]
name = "example-nats-supercluster"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
k8s-openapi.workspace = true

View File

@@ -1,6 +0,0 @@
# Cluster 1
export HARMONY_NATS_SITE_1="kubeconfig=$HOME/.config/nt/kube/config,context=your_cluster_1_kube_context_name"
export HARMONY_NATS_SITE_1_DOMAIN="your_cluster_1_public_domain"
# Cluster 2
export HARMONY_NATS_SITE_2="kubeconfig=$HOME/.config/nt/kube/config,context=your_cluster_2_kube_context_name"
export HARMONY_NATS_SITE_2_DOMAIN="your_cluster_2_public_domain"

View File

@@ -1,481 +0,0 @@
use std::{collections::BTreeMap, str::FromStr};
use harmony::{
interpret::{InterpretError, Outcome},
inventory::Inventory,
modules::{
cert_manager::{
capability::{CertificateManagement, CertificateManagementConfig},
crd::CaIssuer,
},
helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
k8s::resource::K8sResourceScore,
okd::{
crd::route::{RoutePort, RouteSpec, RouteTargetReference, TLSConfig},
route::OKDRouteScore,
},
},
score::Score,
topology::{
HelmCommand, K8sAnywhereConfig, K8sAnywhereTopology, K8sclient, TlsRouter, Topology,
},
};
use harmony_macros::hurl;
use k8s_openapi::{
ByteString, api::core::v1::Secret, apimachinery::pkg::apis::meta::v1::ObjectMeta,
};
use log::{debug, info};
#[tokio::main]
async fn main() -> Result<(), InterpretError> {
let namespace = "nats-supercluster-test";
let self_signed_issuer_name = "harmony-self-signed-issuer";
let ca_issuer_name = "harmony-ca-issuer";
let root_ca_cert_name = "harmony-root-ca";
log::info!("starting nats supercluster bootstrap");
// --------------------------------------------------
// 1. Build site contexts
// --------------------------------------------------
let site1 = site(
"HARMONY_NATS_SITE_1",
"HARMONY_NATS_SITE_1_DOMAIN",
"nats-sto1-cert-test1",
);
let site2 = site(
"HARMONY_NATS_SITE_2",
"HARMONY_NATS_SITE_2_DOMAIN",
"nats-cb1-cert-test2",
);
// --------------------------------------------------
// 2. Ensure clusters are reachable
// --------------------------------------------------
log::info!("ensuring both topologies are ready");
tokio::try_join!(site1.topology.ensure_ready(), site2.topology.ensure_ready(),)?;
// --------------------------------------------------
// 3. Create certificates
// --------------------------------------------------
log::info!("creating certificates");
let root_ca_config = CertificateManagementConfig {
namespace: Some(namespace.into()),
acme_issuer: None,
ca_issuer: Some(CaIssuer {
secret_name: format!("{}-tls", root_ca_cert_name),
}),
self_signed: false,
};
let self_signed_config = CertificateManagementConfig {
namespace: Some(namespace.to_string().clone()),
acme_issuer: None,
ca_issuer: None,
self_signed: true,
};
tokio::try_join!(
create_nats_certs(
site1.topology.clone(),
&site1.cluster,
ca_issuer_name,
&root_ca_config,
self_signed_issuer_name,
&self_signed_config,
root_ca_cert_name
),
create_nats_certs(
site2.topology.clone(),
&site2.cluster,
ca_issuer_name,
&root_ca_config,
self_signed_issuer_name,
&self_signed_config,
root_ca_cert_name
),
)?;
// --------------------------------------------------
// 4. Build CA bundle
// --------------------------------------------------
log::info!("building supercluster CA bundle");
let mut ca_bundle = Vec::new();
ca_bundle.push(
site1
.topology
.get_ca_certificate(root_ca_cert_name.to_string(), &root_ca_config)
.await?,
);
ca_bundle.push(
site2
.topology
.get_ca_certificate(root_ca_cert_name.to_string(), &root_ca_config)
.await?,
);
// --------------------------------------------------
// 5. Build Scores
// --------------------------------------------------
log::info!("building scores");
let site1_scores = vec![
build_ca_bundle_secret_score(
site1.topology.clone(),
&site1.cluster,
&ca_bundle,
namespace.into(),
)
.await,
build_route_score(site1.topology.clone(), &site1.cluster, namespace.into()).await,
build_deploy_nats_score(
site1.topology.clone(),
&site1.cluster,
vec![&site2.cluster],
namespace.into(),
)
.await,
];
let site2_scores = vec![
build_ca_bundle_secret_score(
site2.topology.clone(),
&site2.cluster,
&ca_bundle,
namespace.into(),
)
.await,
build_route_score(site2.topology.clone(), &site2.cluster, namespace.into()).await,
build_deploy_nats_score(
site2.topology.clone(),
&site2.cluster,
vec![&site1.cluster],
namespace.into(),
)
.await,
];
// --------------------------------------------------
// 6. Apply Scores
// --------------------------------------------------
log::info!("applying scores");
tokio::try_join!(
apply_scores(site1.topology.clone(), site1_scores),
apply_scores(site2.topology.clone(), site2_scores),
)?;
log::info!("supercluster bootstrap complete");
log::info!(
"Enjoy! You can test your nats cluster by running : `kubectl exec -n {namespace} -it deployment/nats-box -- nats pub test hi`"
);
Ok(())
}
async fn apply_scores<T: Topology + 'static>(
topology: T,
scores: Vec<Box<dyn Score<T>>>,
) -> Result<(), InterpretError> {
info!("applying {} scores", scores.len());
harmony_cli::run(Inventory::autoload(), topology, scores, None)
.await
.map_err(|e| InterpretError::new(e.to_string()))?;
Ok(())
}
fn site(
topo_env: &str,
domain_env: &str,
cluster_name: &'static str,
) -> SiteContext<K8sAnywhereTopology> {
let domain = std::env::var(domain_env).expect("missing domain env");
let topology =
K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(topo_env));
SiteContext {
topology,
cluster: NatsCluster {
replicas: 1,
name: cluster_name,
gateway_advertise: format!("{cluster_name}-gw.{domain}:443"),
dns_name: format!("{cluster_name}-gw.{domain}"),
supercluster_ca_secret_name: "nats-supercluster-ca-bundle",
tls_cert_name: "nats-gateway",
jetstream_enabled: "true",
},
}
}
struct SiteContext<T> {
topology: T,
cluster: NatsCluster,
}
struct NatsCluster {
replicas: usize,
name: &'static str,
gateway_advertise: String,
dns_name: String,
supercluster_ca_secret_name: &'static str,
tls_cert_name: &'static str,
jetstream_enabled: &'static str,
}
async fn create_nats_certs<T: Topology + CertificateManagement>(
topology: T,
cluster: &NatsCluster,
ca_issuer_name: &str,
ca_cert_mgmt_config: &CertificateManagementConfig,
self_signed_issuer_name: &str,
self_signed_cert_config: &CertificateManagementConfig,
root_ca_cert_name: &str,
) -> Result<Outcome, InterpretError> {
//the order is pretty important
debug!(
"Applying certs to ns {:#?}",
ca_cert_mgmt_config.namespace.clone()
);
debug!("creating issuer '{}'", self_signed_issuer_name);
topology
.create_issuer(
self_signed_issuer_name.to_string(),
&self_signed_cert_config,
)
.await?;
debug!("creating certificate {root_ca_cert_name}");
topology
.create_certificate(
root_ca_cert_name.to_string(),
self_signed_issuer_name.to_string(),
Some(format!("harmony-{}-ca", cluster.name)),
None,
Some(true),
ca_cert_mgmt_config,
)
.await?;
debug!("creating issuer '{}'", ca_issuer_name);
topology
.create_issuer(ca_issuer_name.to_string(), ca_cert_mgmt_config)
.await?;
debug!("creating certificate {}", cluster.tls_cert_name);
topology
.create_certificate(
cluster.tls_cert_name.to_string(),
ca_issuer_name.to_string(),
None,
Some(vec![cluster.dns_name.clone()]),
Some(true),
ca_cert_mgmt_config,
)
.await?;
Ok(Outcome::success("success".to_string()))
}
async fn build_ca_bundle_secret(
namespace: &str,
nats_cluster: &NatsCluster,
bundle: &Vec<String>,
) -> Secret {
Secret {
metadata: ObjectMeta {
name: Some(nats_cluster.supercluster_ca_secret_name.to_string()),
namespace: Some(namespace.to_string()),
..Default::default()
},
data: Some(build_secret_data(bundle).await),
immutable: Some(false),
type_: Some("Opaque".to_string()),
string_data: None,
}
}
async fn build_secret_data(bundle: &Vec<String>) -> BTreeMap<String, ByteString> {
let mut data = BTreeMap::new();
data.insert(
"ca.crt".to_string(),
ByteString(bundle.join("\n").into_bytes()),
);
data
}
async fn build_ca_bundle_secret_score<T: Topology + K8sclient + 'static>(
_topology: T,
nats_cluster: &NatsCluster,
ca_bundle: &Vec<String>,
namespace: String,
) -> Box<dyn Score<T>> {
let bundle_secret = build_ca_bundle_secret(&namespace, nats_cluster, ca_bundle).await;
debug!(
"deploying secret to ns: {} \nsecret: {:#?}",
namespace, bundle_secret
);
let k8ssecret = K8sResourceScore::single(bundle_secret, Some(namespace));
Box::new(k8ssecret)
}
async fn build_route_score<T: Topology + K8sclient + 'static>(
_topology: T,
cluster: &NatsCluster,
namespace: String,
) -> Box<dyn Score<T>> {
let route = OKDRouteScore {
name: cluster.name.to_string(),
namespace,
spec: RouteSpec {
to: RouteTargetReference {
kind: "Service".to_string(),
name: cluster.name.to_string(),
weight: Some(100),
},
host: Some(cluster.dns_name.clone()),
port: Some(RoutePort { target_port: 7222 }),
tls: Some(TLSConfig {
insecure_edge_termination_policy: None,
termination: "passthrough".to_string(),
..Default::default()
}),
wildcard_policy: None,
..Default::default()
},
};
Box::new(route)
}
async fn build_deploy_nats_score<T: Topology + HelmCommand + TlsRouter + 'static>(
topology: T,
cluster: &NatsCluster,
peers: Vec<&NatsCluster>,
namespace: String,
) -> Box<dyn Score<T>> {
let mut gateway_gateways = String::new();
for peer in peers {
// Construct wss:// URLs on port 443 for the remote gateways
gateway_gateways.push_str(&format!(
r#"
- name: {}
urls:
- nats://{}"#,
peer.name, peer.gateway_advertise
));
}
let domain = topology.get_internal_domain().await.unwrap().unwrap();
// Inject gateway config into the 'merge' block to comply with chart structure
let values_yaml = Some(format!(
r#"config:
merge:
authorization:
default_permissions:
publish: ["TEST.*"]
subscribe: ["PUBLIC.>"]
users:
# - user: "admin"
# password: "admin_1"
# permissions:
# publish: ">"
# subscribe: ">"
- password: "enGk0cgZUabM6bN6FXHT"
user: "testUser"
accounts:
system:
users:
- user: "admin"
password: "admin_2"
logtime: true
debug: true
trace: true
system_account: system
cluster:
name: {cluster_name}
enabled: true
replicas: {replicas}
jetstream:
enabled: {jetstream_enabled}
fileStorage:
enabled: true
size: 10Gi
storageDirectory: /data/jetstream
leafnodes:
enabled: false
websocket:
enabled: false
ingress:
enabled: true
className: openshift-default
pathType: Prefix
hosts:
- nats-ws.{domain}
gateway:
enabled: true
port: 7222
name: {cluster_name}
merge:
advertise: {gateway_advertise}
gateways: {gateway_gateways}
tls:
enabled: true
secretName: {tls_secret_name}
# merge:
# ca_file: "/etc/nats-certs/gateway/ca.crt"
service:
ports:
gateway:
enabled: true
tlsCA:
enabled: true
secretName: {supercluster_ca_secret_name}
natsBox:
container:
image:
tag: nonroot"#,
cluster_name = cluster.name,
replicas = cluster.replicas,
domain = domain,
gateway_gateways = gateway_gateways,
gateway_advertise = cluster.gateway_advertise,
tls_secret_name = format!("{}-tls", cluster.tls_cert_name),
jetstream_enabled = cluster.jetstream_enabled,
supercluster_ca_secret_name = cluster.supercluster_ca_secret_name,
));
debug!("Prepared Helm Chart values : \n{values_yaml:#?}");
let nats = HelmChartScore {
namespace: Some(NonBlankString::from_str(&namespace).unwrap()),
release_name: NonBlankString::from_str(&cluster.name).unwrap(),
chart_name: NonBlankString::from_str("nats/nats").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml,
create_namespace: true,
install_only: false,
repository: Some(HelmRepository::new(
"nats".to_string(),
hurl!("https://nats-io.github.io/k8s/helm/charts/"),
true,
)),
};
Box::new(nats)
}

View File

@@ -1,18 +0,0 @@
[package]
name = "example-nats"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -1,120 +0,0 @@
use std::str::FromStr;
use harmony::{
inventory::Inventory,
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
topology::{HelmCommand, K8sAnywhereConfig, K8sAnywhereTopology, TlsRouter, Topology},
};
use harmony_macros::hurl;
use log::info;
#[tokio::main]
async fn main() {
let site1_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
"HARMONY_NATS_SITE_1",
));
let site2_topo = K8sAnywhereTopology::with_config(K8sAnywhereConfig::remote_k8s_from_env_var(
"HARMONY_NATS_SITE_2",
));
let site1_domain = site1_topo.get_internal_domain().await.unwrap().unwrap();
let site2_domain = site2_topo.get_internal_domain().await.unwrap().unwrap();
let site1_gateway = format!("nats-gateway.{}", site1_domain);
let site2_gateway = format!("nats-gateway.{}", site2_domain);
tokio::join!(
deploy_nats(
site1_topo,
"site-1",
vec![("site-2".to_string(), site2_gateway)]
),
deploy_nats(
site2_topo,
"site-2",
vec![("site-1".to_string(), site1_gateway)]
),
);
}
async fn deploy_nats<T: Topology + HelmCommand + TlsRouter + 'static>(
topology: T,
cluster_name: &str,
remote_gateways: Vec<(String, String)>,
) {
topology.ensure_ready().await.unwrap();
let mut gateway_gateways = String::new();
for (name, url) in remote_gateways {
gateway_gateways.push_str(&format!(
r#"
- name: {name}
urls:
- nats://{url}:7222"#
));
}
let values_yaml = Some(format!(
r#"config:
cluster:
enabled: true
replicas: 3
jetstream:
enabled: true
fileStorage:
enabled: true
size: 10Gi
storageDirectory: /data/jetstream
leafnodes:
enabled: false
# port: 7422
websocket:
enabled: true
ingress:
enabled: true
className: openshift-default
pathType: Prefix
hosts:
- nats-ws.{}
gateway:
enabled: true
name: {}
port: 7222
gateways: {}
service:
ports:
gateway:
enabled: true
natsBox:
container:
image:
tag: nonroot"#,
topology.get_internal_domain().await.unwrap().unwrap(),
cluster_name,
gateway_gateways,
));
let namespace = "nats";
let nats = HelmChartScore {
namespace: Some(NonBlankString::from_str(namespace).unwrap()),
release_name: NonBlankString::from_str("nats").unwrap(),
chart_name: NonBlankString::from_str("nats/nats").unwrap(),
chart_version: None,
values_overrides: None,
values_yaml,
create_namespace: true,
install_only: false,
repository: Some(HelmRepository::new(
"nats".to_string(),
hurl!("https://nats-io.github.io/k8s/helm/charts/"),
true,
)),
};
harmony_cli::run(Inventory::autoload(), topology, vec![Box::new(nats)], None)
.await
.unwrap();
info!(
"Enjoy! You can test your nats cluster by running : `kubectl exec -n {namespace} -it deployment/nats-box -- nats pub test hi`"
);
}

View File

@@ -1,16 +0,0 @@
[package]
name = "example-node-health"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }

View File

@@ -1,17 +0,0 @@
use harmony::{
inventory::Inventory, modules::node_health::NodeHealthScore, topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let node_health = NodeHealthScore {};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(node_health)],
None,
)
.await
.unwrap();
}

View File

@@ -1,22 +0,0 @@
[package]
name = "example-okd-cluster-alerts"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true
brocade = { path = "../../brocade" }

View File

@@ -1,67 +0,0 @@
use harmony::{
inventory::Inventory,
modules::monitoring::{
alert_channel::discord_alert_channel::DiscordReceiver,
alert_rule::{
alerts::{
infra::opnsense::high_http_error_rate, k8s::pvc::high_pvc_fill_rate_over_two_days,
},
prometheus_alert_rule::AlertManagerRuleGroup,
},
okd::openshift_cluster_alerting_score::OpenshiftClusterAlertScore,
scrape_target::prometheus_node_exporter::PrometheusNodeExporter,
},
topology::{
K8sAnywhereTopology,
monitoring::{AlertMatcher, AlertRoute, MatchOp},
},
};
use harmony_macros::{hurl, ip};
#[tokio::main]
async fn main() {
let platform_matcher = AlertMatcher {
label: "prometheus".to_string(),
operator: MatchOp::Eq,
value: "openshift-monitoring/k8s".to_string(),
};
let severity = AlertMatcher {
label: "severity".to_string(),
operator: MatchOp::Eq,
value: "critical".to_string(),
};
let high_http_error_rate = high_http_error_rate();
let additional_rules = AlertManagerRuleGroup::new("test-rule", vec![high_http_error_rate]);
let scrape_target = PrometheusNodeExporter {
job_name: "firewall".to_string(),
metrics_path: "/metrics".to_string(),
listen_address: ip!("192.168.1.1"),
port: 9100,
..Default::default()
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(OpenshiftClusterAlertScore {
receivers: vec![Box::new(DiscordReceiver {
name: "crit-wills-discord-channel-example".to_string(),
url: hurl!("https://test.io"),
route: AlertRoute {
matchers: vec![severity],
..AlertRoute::default("crit-wills-discord-channel-example".to_string())
},
})],
sender: harmony::modules::monitoring::okd::OpenshiftClusterAlertSender,
rules: vec![Box::new(additional_rules)],
scrape_targets: Some(vec![Box::new(scrape_target)]),
})],
None,
)
.await
.unwrap();
}

View File

@@ -19,5 +19,3 @@ log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true
brocade = { path = "../../brocade" }
schemars = "0.8"

View File

@@ -4,10 +4,7 @@ use crate::topology::{get_inventory, get_topology};
use harmony::{
config::secret::SshKeyPair,
data::{FileContent, FilePath},
modules::{
inventory::HarmonyDiscoveryStrategy,
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
},
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
score::Score,
topology::HAClusterTopology,
};
@@ -29,8 +26,7 @@ async fn main() {
},
})];
scores
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
harmony_cli::run(inventory, topology, scores, None)
.await

View File

@@ -1,25 +1,16 @@
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr;
use harmony::{
hardware::{Location, SwitchGroup},
infra::{
brocade::{BrocadeSwitchClient, BrocadeSwitchConfig},
opnsense::OPNSenseManagementInterface,
},
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::brocade::BrocadeSwitchAuth,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{
net::IpAddr,
sync::{Arc, OnceLock},
};
use std::{net::IpAddr, sync::Arc};
#[derive(Secret, Serialize, Deserialize, JsonSchema, Debug, PartialEq)]
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
struct OPNSenseFirewallConfig {
username: String,
password: String,
@@ -31,25 +22,6 @@ pub async fn get_topology() -> HAClusterTopology {
name: String::from("opnsense-1"),
};
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
let brocade_options = BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
};
let switch_client = BrocadeSwitchClient::init(BrocadeSwitchConfig {
ips: switches,
auth: switch_auth,
options: brocade_options,
})
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
let config = config.unwrap();
@@ -66,7 +38,6 @@ pub async fn get_topology() -> HAClusterTopology {
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
@@ -87,9 +58,7 @@ pub async fn get_topology() -> HAClusterTopology {
name: "bootstrap".to_string(),
},
workers: vec![],
node_exporter: opnsense.clone(),
switch_client: switch_client.clone(),
network_manager: OnceLock::new(),
switch: vec![],
}
}

View File

@@ -19,5 +19,3 @@ log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true
brocade = { path = "../../brocade" }
schemars = "0.8"

View File

@@ -1,22 +1,14 @@
use brocade::BrocadeOptions;
use cidr::Ipv4Cidr;
use harmony::{
config::secret::OPNSenseFirewallCredentials,
hardware::{Location, SwitchGroup},
infra::{
brocade::{BrocadeSwitchClient, BrocadeSwitchConfig},
opnsense::OPNSenseManagementInterface,
},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::brocade::BrocadeSwitchAuth,
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, ipv4};
use harmony_secret::SecretManager;
use std::{
net::IpAddr,
sync::{Arc, OnceLock},
};
use std::{net::IpAddr, sync::Arc};
pub async fn get_topology() -> HAClusterTopology {
let firewall = harmony::topology::LogicalHost {
@@ -24,25 +16,6 @@ pub async fn get_topology() -> HAClusterTopology {
name: String::from("opnsense-1"),
};
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
.await
.expect("Failed to get credentials");
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
let brocade_options = BrocadeOptions {
dry_run: *harmony::config::DRY_RUN,
..Default::default()
};
let switch_client = BrocadeSwitchClient::init(BrocadeSwitchConfig {
ips: switches,
auth: switch_auth,
options: brocade_options,
})
.await
.expect("Failed to connect to switch");
let switch_client = Arc::new(switch_client);
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
let config = config.unwrap();
@@ -59,7 +32,6 @@ pub async fn get_topology() -> HAClusterTopology {
let gateway_ipv4 = ipv4!("192.168.1.1");
let gateway_ip = IpAddr::V4(gateway_ipv4);
harmony::topology::HAClusterTopology {
kubeconfig: None,
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
@@ -80,9 +52,7 @@ pub async fn get_topology() -> HAClusterTopology {
name: "cp0".to_string(),
},
workers: vec![],
node_exporter: opnsense.clone(),
switch_client: switch_client.clone(),
network_manager: OnceLock::new(),
switch: vec![],
}
}

View File

@@ -1,14 +0,0 @@
[package]
name = "example-openbao"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_macros = { path = "../../harmony_macros" }
harmony_types = { path = "../../harmony_types" }
tokio.workspace = true
url.workspace = true

View File

@@ -1,7 +0,0 @@
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
https://openbao.org/docs/platform/k8s/helm/run/

View File

@@ -1,19 +0,0 @@
use harmony::{
inventory::Inventory, modules::openbao::OpenbaoScore, topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let openbao = OpenbaoScore {
host: "openbao.sebastien.sto1.nationtech.io".to_string(),
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(openbao)],
None,
)
.await
.unwrap();
}

View File

@@ -1,18 +0,0 @@
[package]
name = "example-operatorhub-catalogsource"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -1,20 +0,0 @@
use harmony::{
inventory::Inventory,
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
let cnpg_operator = CloudNativePgOperatorScore::default_openshift();
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
None,
)
.await
.unwrap();
}

View File

@@ -8,7 +8,7 @@ publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_tui = { path = "../../harmony_tui" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
@@ -16,7 +16,3 @@ harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
harmony_secret = { path = "../../harmony_secret" }
brocade = { path = "../../brocade" }
serde = { workspace = true }
schemars = "0.8"

View File

@@ -1,3 +0,0 @@
export HARMONY_SECRET_NAMESPACE=example-opnsense
export HARMONY_SECRET_STORE=file
export RUST_LOG=info

View File

@@ -1,78 +1,111 @@
use harmony::{
config::secret::OPNSenseFirewallCredentials,
infra::opnsense::OPNSenseFirewall,
inventory::Inventory,
modules::{dhcp::DhcpScore, opnsense::OPNsenseShellCommandScore},
topology::LogicalHost,
use std::{
net::{IpAddr, Ipv4Addr},
sync::Arc,
};
use harmony_macros::{ip, ipv4};
use harmony_secret::{Secret, SecretManager};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use cidr::Ipv4Cidr;
use harmony::{
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
infra::opnsense::OPNSenseManagementInterface,
inventory::Inventory,
modules::{
dummy::{ErrorScore, PanicScore, SuccessScore},
http::StaticFilesHttpScore,
okd::{dhcp::OKDDhcpScore, dns::OKDDnsScore, load_balancer::OKDLoadBalancerScore},
opnsense::OPNsenseShellCommandScore,
tftp::TftpScore,
},
topology::{LogicalHost, UnmanagedRouter},
};
use harmony_macros::{ip, mac_address};
use harmony_types::net::Url;
#[tokio::main]
async fn main() {
let firewall = LogicalHost {
ip: ip!("192.168.55.1"),
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.5.229"),
name: String::from("opnsense-1"),
};
let opnsense_auth = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>()
.await
.expect("Failed to get credentials");
let opnsense = OPNSenseFirewall::new(
firewall,
None,
&opnsense_auth.username,
&opnsense_auth.password,
)
.await;
let dhcp_score = DhcpScore {
dhcp_range: (
ipv4!("192.168.55.100").into(),
ipv4!("192.168.55.150").into(),
),
host_binding: vec![],
next_server: None,
boot_filename: None,
filename: None,
filename64: None,
filenameipxe: Some("filename.ipxe".to_string()),
domain: None,
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let lan_subnet = Ipv4Addr::new(10, 100, 8, 0);
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
let gateway_ip = IpAddr::V4(gateway_ipv4);
let topology = harmony::topology::HAClusterTopology {
domain_name: "demo.harmony.mcd".to_string(),
router: Arc::new(UnmanagedRouter::new(
gateway_ip,
Ipv4Cidr::new(lan_subnet, 24).unwrap(),
)),
load_balancer: opnsense.clone(),
firewall: opnsense.clone(),
tftp_server: opnsense.clone(),
http_server: opnsense.clone(),
dhcp_server: opnsense.clone(),
dns_server: opnsense.clone(),
control_plane: vec![LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
}],
bootstrap_host: LogicalHost {
ip: ip!("10.100.8.20"),
name: "cp0".to_string(),
},
workers: vec![],
switch: vec![],
};
// let dns_score = OKDDnsScore::new(&topology);
// let load_balancer_score = OKDLoadBalancerScore::new(&topology);
//
// let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
// let http_score = StaticFilesHttpScore {
// folder_to_serve: Some(Url::LocalFolder(
// "./data/watchguard/pxe-http-files".to_string(),
// )),
// files: vec![],
// remote_path: None,
// };
let opnsense_config = opnsense.get_opnsense_config();
harmony_cli::run(
Inventory::autoload(),
opnsense,
vec![
Box::new(dhcp_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense_config,
command: "touch /tmp/helloharmonytouching_2".to_string(),
}),
let inventory = Inventory {
location: Location::new(
"232 des Éperviers, Wendake, Qc, G0A 4V0".to_string(),
"wk".to_string(),
),
switch: SwitchGroup::from([]),
firewall_mgmt: Box::new(OPNSenseManagementInterface::new()),
storage_host: vec![],
worker_host: vec![],
control_plane_host: vec![
PhysicalHost::empty(HostCategory::Server)
.mac_address(mac_address!("08:00:27:62:EC:C3")),
],
};
// TODO regroup smaller scores in a larger one such as this
// let okd_boostrap_preparation();
let dhcp_score = OKDDhcpScore::new(&topology, &inventory);
let dns_score = OKDDnsScore::new(&topology);
let load_balancer_score = OKDLoadBalancerScore::new(&topology);
let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string()));
let http_score = StaticFilesHttpScore {
folder_to_serve: Some(Url::LocalFolder(
"./data/watchguard/pxe-http-files".to_string(),
)),
files: vec![],
remote_path: None,
};
harmony_tui::run(
inventory,
topology,
vec![
Box::new(dns_score),
Box::new(dhcp_score),
Box::new(load_balancer_score),
Box::new(tftp_score),
Box::new(http_score),
Box::new(OPNsenseShellCommandScore {
opnsense: opnsense.get_opnsense_config(),
command: "touch /tmp/helloharmonytouching".to_string(),
}),
Box::new(SuccessScore {}),
Box::new(ErrorScore {}),
Box::new(PanicScore {}),
],
None,
)
.await
.unwrap();
}
#[derive(Secret, Serialize, Deserialize, JsonSchema, Debug)]
pub struct BrocadeSwitchAuth {
pub username: String,
pub password: String,
}

View File

@@ -1,21 +0,0 @@
[package]
name = "example-opnsense-node-exporter"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
harmony_secret = { path = "../../harmony_secret" }
harmony_secret_derive = { path = "../../harmony_secret_derive" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }
serde.workspace = true
async-trait.workspace = true

View File

@@ -1,71 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use harmony::{
executors::ExecutorError,
inventory::Inventory,
modules::opnsense::node_exporter::NodeExporterScore,
topology::{PreparationError, PreparationOutcome, Topology, node_exporter::NodeExporter},
};
use harmony_macros::ip;
#[derive(Debug)]
struct OpnSenseTopology {
node_exporter: Arc<dyn NodeExporter>,
}
#[async_trait]
impl Topology for OpnSenseTopology {
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
Ok(PreparationOutcome::Success {
details: "Success".to_string(),
})
}
fn name(&self) -> &str {
"OpnsenseTopology"
}
}
#[async_trait]
impl NodeExporter for OpnSenseTopology {
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
self.node_exporter.ensure_initialized().await
}
async fn commit_config(&self) -> Result<(), ExecutorError> {
self.node_exporter.commit_config().await
}
async fn reload_restart(&self) -> Result<(), ExecutorError> {
self.node_exporter.reload_restart().await
}
}
#[tokio::main]
async fn main() {
let firewall = harmony::topology::LogicalHost {
ip: ip!("192.168.1.1"),
name: String::from("fw0"),
};
let opnsense = Arc::new(
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
);
let topology = OpnSenseTopology {
node_exporter: opnsense.clone(),
};
let inventory = Inventory::empty();
let node_exporter_score = NodeExporterScore {};
harmony_cli::run(
inventory,
topology,
vec![Box::new(node_exporter_score)],
None,
)
.await
.unwrap();
}

View File

@@ -1,18 +0,0 @@
[package]
name = "example-postgresql"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -1,26 +0,0 @@
use harmony::{
inventory::Inventory,
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let postgresql = PostgreSQLScore {
config: PostgreSQLConfig {
cluster_name: "harmony-postgres-example".to_string(), // Override default name
namespace: "harmony-postgres-example".to_string(),
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
// "default" namespace, 1 instance, 1Gi storage
},
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(postgresql)],
None,
)
.await
.unwrap();
}

View File

@@ -1,18 +0,0 @@
[package]
name = "example-public-postgres"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
publish = false
[dependencies]
harmony = { path = "../../harmony" }
harmony_cli = { path = "../../harmony_cli" }
harmony_types = { path = "../../harmony_types" }
cidr = { workspace = true }
tokio = { workspace = true }
harmony_macros = { path = "../../harmony_macros" }
log = { workspace = true }
env_logger = { workspace = true }
url = { workspace = true }

View File

@@ -1,37 +0,0 @@
use harmony::{
inventory::Inventory,
modules::postgresql::{
PostgreSQLConnectionScore, PublicPostgreSQLScore, capability::PostgreSQLConfig,
},
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let postgres = PublicPostgreSQLScore {
config: PostgreSQLConfig {
cluster_name: "harmony-postgres-example".to_string(), // Override default name
namespace: "harmony-public-postgres".to_string(),
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
// 1 instance, 1Gi storage
},
hostname: "postgrestest.sto1.nationtech.io".to_string(),
};
let test_connection = PostgreSQLConnectionScore {
name: "harmony-postgres-example".to_string(),
namespace: "harmony-public-postgres".to_string(),
cluster_name: "harmony-postgres-example".to_string(),
hostname: Some("postgrestest.sto1.nationtech.io".to_string()),
port_override: Some(443),
};
harmony_cli::run(
Inventory::autoload(),
K8sAnywhereTopology::from_env(),
vec![Box::new(postgres), Box::new(test_connection)],
None,
)
.await
.unwrap();
}

View File

@@ -1,11 +0,0 @@
[package]
name = "example-remove-rook-osd"
edition = "2024"
version.workspace = true
readme.workspace = true
license.workspace = true
[dependencies]
harmony = { version = "0.1.0", path = "../../harmony" }
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
tokio.workspace = true

View File

@@ -1,18 +0,0 @@
use harmony::{
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
topology::K8sAnywhereTopology,
};
#[tokio::main]
async fn main() {
let ceph_score = CephRemoveOsd {
osd_deployment_name: "rook-ceph-osd-2".to_string(),
rook_ceph_namespace: "rook-ceph".to_string(),
};
let topology = K8sAnywhereTopology::from_env();
let inventory = Inventory::autoload();
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
.await
.unwrap();
}

Some files were not shown because too many files have changed in this diff Show More