Compare commits
73 Commits
fix/clippy
...
feat/postg
| Author | SHA1 | Date | |
|---|---|---|---|
| 204795a74f | |||
| 66a9a76a6b | |||
| 440e684b35 | |||
| b0383454f0 | |||
| 9e8f3ce52f | |||
| c3ec7070ec | |||
| 29821d5e9f | |||
| 446e079595 | |||
| e0da5764fb | |||
| e9cab92585 | |||
| d06bd4dac6 | |||
| 142300802d | |||
| 2254641f3d | |||
| b61e4f9a96 | |||
| 2e367d88d4 | |||
| 9edc42a665 | |||
| f242aafebb | |||
| 3e14ebd62c | |||
| 1b19638df4 | |||
| d39b1957cd | |||
| 357ca93d90 | |||
| 8103932f23 | |||
| 9617e1cfde | |||
| a953284386 | |||
| bfde5f58ed | |||
| 83c1cc82b6 | |||
| 66d346a10c | |||
| 06a004a65d | |||
| 9d4e6acac0 | |||
| 4ff57062ae | |||
| 50ce54ea66 | |||
|
|
827a49e56b | ||
| 95cfc03518 | |||
| c80ede706b | |||
| b2825ec1ef | |||
| 609d7acb5d | |||
| de761cf538 | |||
| c069207f12 | |||
|
|
7368184917 | ||
| 05205f4ac1 | |||
| 3174645c97 | |||
| 7536f4ec4b | |||
| 464347d3e5 | |||
| 7f415f5b98 | |||
| 2a520a1d7c | |||
| 987f195e2f | |||
| 14d1823d15 | |||
| 2a48d51479 | |||
| 20a227bb41 | |||
| ce91ee0168 | |||
| ed7f81aa1f | |||
| cb66b7592e | |||
| a815f6ac9c | |||
| 2d891e4463 | |||
| f66e58b9ca | |||
| ea39d93aa7 | |||
| 6989d208cf | |||
| c0d54a4466 | |||
| fc384599a1 | |||
| c0bd8007c7 | |||
| 7dff70edcf | |||
| 06a0c44c3c | |||
| 85bec66e58 | |||
| 1f3796f503 | |||
| cf576192a8 | |||
| 5f78300d78 | |||
| f7e9669009 | |||
| 2d3c32469c | |||
| f65e16df7b | |||
| 1cec398d4d | |||
| 58b6268989 | |||
| 4a500e4eb7 | |||
| f073b7e5fb |
96
Cargo.lock
generated
96
Cargo.lock
generated
@@ -429,6 +429,15 @@ dependencies = [
|
|||||||
"wait-timeout",
|
"wait-timeout",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "assertor"
|
||||||
|
version = "0.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4ff24d87260733dc86d38a11c60d9400ce4a74a05d0dafa2a6f5ab249cd857cb"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-broadcast"
|
name = "async-broadcast"
|
||||||
version = "0.7.2"
|
version = "0.7.2"
|
||||||
@@ -665,6 +674,22 @@ dependencies = [
|
|||||||
"serde_with",
|
"serde_with",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "brocade"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"env_logger",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"regex",
|
||||||
|
"russh",
|
||||||
|
"russh-keys",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "brotli"
|
name = "brotli"
|
||||||
version = "8.0.2"
|
version = "8.0.2"
|
||||||
@@ -1751,10 +1776,26 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-multisite-postgres"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-nanodc"
|
name = "example-nanodc"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1763,6 +1804,7 @@ dependencies = [
|
|||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
|
"serde",
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1781,6 +1823,7 @@ dependencies = [
|
|||||||
name = "example-okd-install"
|
name = "example-okd-install"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1795,17 +1838,47 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-openbao"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
"harmony_macros",
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
"harmony_tui",
|
"harmony_tui",
|
||||||
"harmony_types",
|
"harmony_types",
|
||||||
"log",
|
"log",
|
||||||
|
"serde",
|
||||||
"tokio",
|
"tokio",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@@ -1814,6 +1887,7 @@ dependencies = [
|
|||||||
name = "example-pxe"
|
name = "example-pxe"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"brocade",
|
||||||
"cidr",
|
"cidr",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"harmony",
|
"harmony",
|
||||||
@@ -1828,6 +1902,15 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-remove-rook-osd"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-rust"
|
name = "example-rust"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -2305,9 +2388,11 @@ name = "harmony"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"askama",
|
"askama",
|
||||||
|
"assertor",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bollard",
|
"bollard",
|
||||||
|
"brocade",
|
||||||
"chrono",
|
"chrono",
|
||||||
"cidr",
|
"cidr",
|
||||||
"convert_case",
|
"convert_case",
|
||||||
@@ -2338,6 +2423,7 @@ dependencies = [
|
|||||||
"once_cell",
|
"once_cell",
|
||||||
"opnsense-config",
|
"opnsense-config",
|
||||||
"opnsense-config-xml",
|
"opnsense-config-xml",
|
||||||
|
"option-ext",
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"russh",
|
"russh",
|
||||||
@@ -2490,6 +2576,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
"serde",
|
"serde",
|
||||||
|
"serde_json",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -3878,6 +3965,7 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
|||||||
name = "opnsense-config"
|
name = "opnsense-config"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"assertor",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
@@ -4537,9 +4625,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.11.2"
|
version = "1.11.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
@@ -4549,9 +4637,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex-automata"
|
name = "regex-automata"
|
||||||
version = "0.4.10"
|
version = "0.4.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
|
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick 1.1.3",
|
"aho-corasick 1.1.3",
|
||||||
"memchr",
|
"memchr",
|
||||||
|
|||||||
15
Cargo.toml
15
Cargo.toml
@@ -14,7 +14,9 @@ members = [
|
|||||||
"harmony_composer",
|
"harmony_composer",
|
||||||
"harmony_inventory_agent",
|
"harmony_inventory_agent",
|
||||||
"harmony_secret_derive",
|
"harmony_secret_derive",
|
||||||
"harmony_secret", "adr/agent_discovery/mdns",
|
"harmony_secret",
|
||||||
|
"adr/agent_discovery/mdns",
|
||||||
|
"brocade",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -66,5 +68,12 @@ thiserror = "2.0.14"
|
|||||||
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
serde = { version = "1.0.209", features = ["derive", "rc"] }
|
||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
askama = "0.14"
|
askama = "0.14"
|
||||||
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite" ] }
|
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
|
||||||
reqwest = { version = "0.12", features = ["blocking", "stream", "rustls-tls", "http2", "json"], default-features = false }
|
reqwest = { version = "0.12", features = [
|
||||||
|
"blocking",
|
||||||
|
"stream",
|
||||||
|
"rustls-tls",
|
||||||
|
"http2",
|
||||||
|
"json",
|
||||||
|
], default-features = false }
|
||||||
|
assertor = "0.0.4"
|
||||||
|
|||||||
114
adr/015-higher-order-topologies.md
Normal file
114
adr/015-higher-order-topologies.md
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# Architecture Decision Record: Higher-Order Topologies
|
||||||
|
|
||||||
|
**Initial Author:** Jean-Gabriel Gill-Couture
|
||||||
|
**Initial Date:** 2025-12-08
|
||||||
|
**Last Updated Date:** 2025-12-08
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
||||||
|
|
||||||
|
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
||||||
|
|
||||||
|
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
||||||
|
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
||||||
|
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
||||||
|
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
||||||
|
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
||||||
|
|
||||||
|
This makes scaling Harmony impractical as ecosystem grows.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
||||||
|
|
||||||
|
````rust
|
||||||
|
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
||||||
|
/// Delegates to primary for queries; orchestrates deploy across both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Deploy primary; extract certs/endpoint;
|
||||||
|
// deploy replica with pg_basebackup + TLS passthrough.
|
||||||
|
// (Full impl logged/elaborated.)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delegate queries to primary.
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Similarly for other capabilities.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker> Docker for FailoverTopology<T> {
|
||||||
|
// Failover Docker orchestration.
|
||||||
|
}
|
||||||
|
````
|
||||||
|
|
||||||
|
**Key properties:**
|
||||||
|
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
||||||
|
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
||||||
|
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
||||||
|
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
||||||
|
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
||||||
|
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
````rust
|
||||||
|
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
||||||
|
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
||||||
|
pg_failover.deploy_pg(config).await;
|
||||||
|
|
||||||
|
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
||||||
|
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
||||||
|
docker_failover.deploy_docker(...).await;
|
||||||
|
|
||||||
|
// ❌ Compile fail: K8sAnywhere !: Docker
|
||||||
|
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
||||||
|
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
||||||
|
````
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
||||||
|
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
||||||
|
- **Observable**: Logs trace every step.
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
||||||
|
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
| Approach | Pros | Cons |
|
||||||
|
|----------|------|------|
|
||||||
|
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
||||||
|
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
||||||
|
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
||||||
|
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
||||||
|
|
||||||
|
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
||||||
|
- `FailoverTopology` in `failover.rs` is first implementation.
|
||||||
153
adr/015-higher-order-topologies/example.rs
Normal file
153
adr/015-higher-order-topologies/example.rs
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
//! Example of Higher-Order Topologies in Harmony.
|
||||||
|
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
||||||
|
//! supported by a sub-topology `T` via blanket trait impls.
|
||||||
|
//!
|
||||||
|
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
||||||
|
//! Users can:
|
||||||
|
//! - Write new `Topology` (impl capabilities on a struct).
|
||||||
|
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
||||||
|
//! - Compile fails if capability missing (safety).
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio;
|
||||||
|
|
||||||
|
/// Capability trait: Deploy and manage PostgreSQL.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PostgreSQL {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Capability trait: Deploy Docker.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Docker {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for PostgreSQL deployments.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PostgreSQLConfig;
|
||||||
|
|
||||||
|
/// Replication certificates.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ReplicationCerts;
|
||||||
|
|
||||||
|
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct K8sAnywhereTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PostgreSQL for K8sAnywhereTopology {
|
||||||
|
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Real impl: Use k8s helm chart, operator, etc.
|
||||||
|
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
Ok(ReplicationCerts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Concrete topology: Linux Host (supports Docker).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct LinuxHostTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Docker for LinuxHostTopology {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Real impl: Install/configure Docker on host.
|
||||||
|
Ok("LinuxHost Docker deployed".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
||||||
|
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
||||||
|
///
|
||||||
|
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
||||||
|
/// - Same for `Docker`, etc. No boilerplate!
|
||||||
|
/// - Compile-time safe: Missing `T: Capability` → error.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
pub primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
||||||
|
/// Delegates reads to primary; deploys to both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
||||||
|
let primary_result = self.primary.deploy(config).await?;
|
||||||
|
let replica_result = self.replica.deploy(config).await?;
|
||||||
|
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
// Delegate to primary (replica follows).
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover Docker if T provides Docker.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Orchestrate across primary + replica.
|
||||||
|
let primary_result = self.primary.deploy_docker().await?;
|
||||||
|
let replica_result = self.replica.deploy_docker().await?;
|
||||||
|
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let config = PostgreSQLConfig;
|
||||||
|
|
||||||
|
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
||||||
|
let pg_failover = FailoverTopology {
|
||||||
|
primary: K8sAnywhereTopology,
|
||||||
|
replica: K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
let result = pg_failover.deploy(&config).await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
||||||
|
let docker_failover = FailoverTopology {
|
||||||
|
primary: LinuxHostTopology,
|
||||||
|
replica: LinuxHostTopology,
|
||||||
|
};
|
||||||
|
let result = docker_failover.deploy_docker().await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
||||||
|
// let invalid = FailoverTopology {
|
||||||
|
// primary: K8sAnywhereTopology,
|
||||||
|
// replica: K8sAnywhereTopology,
|
||||||
|
// };
|
||||||
|
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
||||||
|
// Very clear error message :
|
||||||
|
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
||||||
|
// --> src/main.rs:90:9
|
||||||
|
// |
|
||||||
|
// 4 | pub struct FailoverTopology<T> {
|
||||||
|
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
||||||
|
// ...
|
||||||
|
// 37 | struct K8sAnywhereTopology;
|
||||||
|
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
||||||
|
// ...
|
||||||
|
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
||||||
|
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
||||||
|
// |
|
||||||
|
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
||||||
|
// --> src/main.rs:61:9
|
||||||
|
// |
|
||||||
|
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
||||||
|
// | ^^^^^^ ------ -------------------
|
||||||
|
// | |
|
||||||
|
// | unsatisfied trait bound introduced here
|
||||||
|
// note: the trait `Docker` must be implemented
|
||||||
|
}
|
||||||
|
|
||||||
18
brocade/Cargo.toml
Normal file
18
brocade/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "brocade"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-trait.workspace = true
|
||||||
|
harmony_types = { path = "../harmony_types" }
|
||||||
|
russh.workspace = true
|
||||||
|
russh-keys.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
log.workspace = true
|
||||||
|
env_logger.workspace = true
|
||||||
|
regex = "1.11.3"
|
||||||
|
harmony_secret = { path = "../harmony_secret" }
|
||||||
|
serde.workspace = true
|
||||||
70
brocade/examples/main.rs
Normal file
70
brocade/examples/main.rs
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use harmony_types::switch::PortLocation;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct BrocadeSwitchAuth {
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||||
|
|
||||||
|
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||||
|
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||||
|
let switch_addresses = vec![ip];
|
||||||
|
|
||||||
|
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let brocade = brocade::init(
|
||||||
|
&switch_addresses,
|
||||||
|
22,
|
||||||
|
&config.username,
|
||||||
|
&config.password,
|
||||||
|
Some(BrocadeOptions {
|
||||||
|
dry_run: true,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Brocade client failed to connect");
|
||||||
|
|
||||||
|
let entries = brocade.get_stack_topology().await.unwrap();
|
||||||
|
println!("Stack topology: {entries:#?}");
|
||||||
|
|
||||||
|
let entries = brocade.get_interfaces().await.unwrap();
|
||||||
|
println!("Interfaces: {entries:#?}");
|
||||||
|
|
||||||
|
let version = brocade.version().await.unwrap();
|
||||||
|
println!("Version: {version:?}");
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let mac_adddresses = brocade.get_mac_address_table().await.unwrap();
|
||||||
|
println!("VLAN\tMAC\t\t\tPORT");
|
||||||
|
for mac in mac_adddresses {
|
||||||
|
println!("{}\t{}\t{}", mac.vlan, mac.mac_address, mac.port);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_name = "1";
|
||||||
|
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_id = brocade.find_available_channel_id().await.unwrap();
|
||||||
|
|
||||||
|
println!("--------------");
|
||||||
|
let channel_name = "HARMONY_LAG";
|
||||||
|
let ports = [PortLocation(2, 0, 35)];
|
||||||
|
brocade
|
||||||
|
.create_port_channel(channel_id, channel_name, &ports)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
212
brocade/src/fast_iron.rs
Normal file
212
brocade/src/fast_iron.rs
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
use super::BrocadeClient;
|
||||||
|
use crate::{
|
||||||
|
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||||
|
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use log::{debug, info};
|
||||||
|
use regex::Regex;
|
||||||
|
use std::{collections::HashSet, str::FromStr};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct FastIronClient {
|
||||||
|
shell: BrocadeShell,
|
||||||
|
version: BrocadeInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastIronClient {
|
||||||
|
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||||
|
shell.before_all(vec!["skip-page-display".into()]);
|
||||||
|
shell.after_all(vec!["page".into()]);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
shell,
|
||||||
|
version: version_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||||
|
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 3 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (vlan, mac_address, port) = match parts.len() {
|
||||||
|
3 => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[2].to_string(),
|
||||||
|
),
|
||||||
|
_ => (
|
||||||
|
1,
|
||||||
|
parse_brocade_mac_address(parts[0]).ok()?,
|
||||||
|
parts[1].to_string(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let port =
|
||||||
|
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Ok(p) => Some(Ok(MacAddressEntry {
|
||||||
|
vlan,
|
||||||
|
mac_address,
|
||||||
|
port: p,
|
||||||
|
})),
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_stack_port_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||||
|
debug!("[Brocade] Parsing stack port entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 10 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let local_port = PortLocation::from_str(parts[0]).ok()?;
|
||||||
|
|
||||||
|
Some(Ok(InterSwitchLink {
|
||||||
|
local_port,
|
||||||
|
remote_port: None,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_port_channel_commands(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Vec<String> {
|
||||||
|
let mut commands = vec![
|
||||||
|
"configure terminal".to_string(),
|
||||||
|
format!("lag {channel_name} static id {channel_id}"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for port in ports {
|
||||||
|
commands.push(format!("ports ethernet {port}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
commands.push(format!("primary-port {}", ports[0]));
|
||||||
|
commands.push("deploy".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
commands.push("write memory".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
|
||||||
|
commands
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for FastIronClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
Ok(self.version.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
info!("[Brocade] Showing MAC address table...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show mac-address", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(2)
|
||||||
|
.filter_map(|line| self.parse_mac_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show interface stack-ports", crate::ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(1)
|
||||||
|
.filter_map(|line| self.parse_stack_port_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
info!("[Brocade] Finding next available channel id...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show lag", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
let re = Regex::new(r"=== LAG .* ID\s+(\d+)").expect("Invalid regex");
|
||||||
|
|
||||||
|
let used_ids: HashSet<u8> = output
|
||||||
|
.lines()
|
||||||
|
.filter_map(|line| {
|
||||||
|
re.captures(line)
|
||||||
|
.and_then(|c| c.get(1))
|
||||||
|
.and_then(|id_match| id_match.as_str().parse().ok())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut next_id: u8 = 1;
|
||||||
|
loop {
|
||||||
|
if !used_ids.contains(&next_id) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Brocade] Found channel id: {next_id}");
|
||||||
|
Ok(next_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!(
|
||||||
|
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let commands = self.build_port_channel_commands(channel_id, channel_name, ports);
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||||
|
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".to_string(),
|
||||||
|
format!("no lag {channel_name}"),
|
||||||
|
"write memory".to_string(),
|
||||||
|
];
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Privileged)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
338
brocade/src/lib.rs
Normal file
338
brocade/src/lib.rs
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::{
|
||||||
|
fmt::{self, Display},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::network_operating_system::NetworkOperatingSystemClient;
|
||||||
|
use crate::{
|
||||||
|
fast_iron::FastIronClient,
|
||||||
|
shell::{BrocadeSession, BrocadeShell},
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::net::MacAddress;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
mod fast_iron;
|
||||||
|
mod network_operating_system;
|
||||||
|
mod shell;
|
||||||
|
mod ssh;
|
||||||
|
|
||||||
|
#[derive(Default, Clone, Debug)]
|
||||||
|
pub struct BrocadeOptions {
|
||||||
|
pub dry_run: bool,
|
||||||
|
pub ssh: ssh::SshOptions,
|
||||||
|
pub timeouts: TimeoutConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct TimeoutConfig {
|
||||||
|
pub shell_ready: Duration,
|
||||||
|
pub command_execution: Duration,
|
||||||
|
pub command_output: Duration,
|
||||||
|
pub cleanup: Duration,
|
||||||
|
pub message_wait: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TimeoutConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
shell_ready: Duration::from_secs(10),
|
||||||
|
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||||
|
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
||||||
|
cleanup: Duration::from_secs(10),
|
||||||
|
message_wait: Duration::from_millis(500),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ExecutionMode {
|
||||||
|
Regular,
|
||||||
|
Privileged,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct BrocadeInfo {
|
||||||
|
os: BrocadeOs,
|
||||||
|
_version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum BrocadeOs {
|
||||||
|
NetworkOperatingSystem,
|
||||||
|
FastIron,
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||||
|
pub struct MacAddressEntry {
|
||||||
|
pub vlan: u16,
|
||||||
|
pub mac_address: MacAddress,
|
||||||
|
pub port: PortDeclaration,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type PortChannelId = u8;
|
||||||
|
|
||||||
|
/// Represents a single physical or logical link connecting two switches within a stack or fabric.
|
||||||
|
///
|
||||||
|
/// This structure provides a standardized view of the topology regardless of the
|
||||||
|
/// underlying Brocade OS configuration (stacking vs. fabric).
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct InterSwitchLink {
|
||||||
|
/// The local port on the switch where the topology command was run.
|
||||||
|
pub local_port: PortLocation,
|
||||||
|
/// The port on the directly connected neighboring switch.
|
||||||
|
pub remote_port: Option<PortLocation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents the key running configuration status of a single switch interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct InterfaceInfo {
|
||||||
|
/// The full configuration name (e.g., "TenGigabitEthernet 1/0/1", "FortyGigabitEthernet 2/0/2").
|
||||||
|
pub name: String,
|
||||||
|
/// The physical location of the interface.
|
||||||
|
pub port_location: PortLocation,
|
||||||
|
/// The parsed type and name prefix of the interface.
|
||||||
|
pub interface_type: InterfaceType,
|
||||||
|
/// The primary configuration mode defining the interface's behavior (L2, L3, Fabric).
|
||||||
|
pub operating_mode: Option<PortOperatingMode>,
|
||||||
|
/// Indicates the current state of the interface.
|
||||||
|
pub status: InterfaceStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Categorizes the functional type of a switch interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum InterfaceType {
|
||||||
|
/// Physical or virtual Ethernet interface (e.g., TenGigabitEthernet, FortyGigabitEthernet).
|
||||||
|
Ethernet(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for InterfaceType {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
InterfaceType::Ethernet(name) => write!(f, "{name}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum PortOperatingMode {
|
||||||
|
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||||
|
Fabric,
|
||||||
|
/// The interface is configured for standard Layer 2 switching as Trunk port (`switchport mode trunk`).
|
||||||
|
Trunk,
|
||||||
|
/// The interface is configured for standard Layer 2 switching as Access port (`switchport` without trunk mode).
|
||||||
|
Access,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the possible status of an interface.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum InterfaceStatus {
|
||||||
|
/// The interface is connected.
|
||||||
|
Connected,
|
||||||
|
/// The interface is not connected and is not expected to be.
|
||||||
|
NotConnected,
|
||||||
|
/// The interface is not connected but is expected to be (configured with `no shutdown`).
|
||||||
|
SfpAbsent,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddr],
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||||
|
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||||
|
|
||||||
|
let version_info = shell
|
||||||
|
.with_session(ExecutionMode::Regular, |session| {
|
||||||
|
Box::pin(get_brocade_info(session))
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(match version_info.os {
|
||||||
|
BrocadeOs::FastIron => Box::new(FastIronClient::init(shell, version_info)),
|
||||||
|
BrocadeOs::NetworkOperatingSystem => {
|
||||||
|
Box::new(NetworkOperatingSystemClient::init(shell, version_info))
|
||||||
|
}
|
||||||
|
BrocadeOs::Unknown => todo!(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait BrocadeClient: std::fmt::Debug {
|
||||||
|
/// Retrieves the operating system and version details from the connected Brocade switch.
|
||||||
|
///
|
||||||
|
/// This is typically the first call made after establishing a connection to determine
|
||||||
|
/// the switch OS family (e.g., FastIron, NOS) for feature compatibility.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A `BrocadeInfo` structure containing parsed OS type and version string.
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error>;
|
||||||
|
|
||||||
|
/// Retrieves the dynamically learned MAC address table from the switch.
|
||||||
|
///
|
||||||
|
/// This is crucial for discovering where specific network endpoints (MAC addresses)
|
||||||
|
/// are currently located on the physical ports.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `MacAddressEntry`, where each entry typically contains VLAN, MAC address,
|
||||||
|
/// and the associated port name/index.
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error>;
|
||||||
|
|
||||||
|
/// Derives the physical connections used to link multiple switches together
|
||||||
|
/// to form a single logical entity (stack, fabric, etc.).
|
||||||
|
///
|
||||||
|
/// This abstracts the underlying configuration (e.g., stack ports, fabric ports)
|
||||||
|
/// to return a standardized view of the topology.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `InterSwitchLink` structs detailing which ports are used for stacking/fabric.
|
||||||
|
/// If the switch is not stacked, returns an empty vector.
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error>;
|
||||||
|
|
||||||
|
/// Retrieves the status for all interfaces
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `InterfaceInfo` structures.
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error>;
|
||||||
|
|
||||||
|
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Scans the existing configuration to find the next available (unused)
|
||||||
|
/// Port-Channel ID (`lag` or `trunk`) for assignment.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// The smallest, unassigned `PortChannelId` within the supported range.
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error>;
|
||||||
|
|
||||||
|
/// Creates and configures a new Port-Channel (Link Aggregation Group or LAG)
|
||||||
|
/// using the specified channel ID and ports.
|
||||||
|
///
|
||||||
|
/// The resulting configuration must be persistent (saved to startup-config).
|
||||||
|
/// Assumes a static LAG configuration mode unless specified otherwise by the implementation.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// * `channel_id`: The ID (e.g., 1-128) for the logical port channel.
|
||||||
|
/// * `channel_name`: A descriptive name for the LAG (used in configuration context).
|
||||||
|
/// * `ports`: A slice of `PortLocation` structs defining the physical member ports.
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Removes all configuration associated with the specified Port-Channel name.
|
||||||
|
///
|
||||||
|
/// This operation should be idempotent; attempting to clear a non-existent
|
||||||
|
/// channel should succeed (or return a benign error).
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// * `channel_name`: The name of the Port-Channel (LAG) to delete.
|
||||||
|
///
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_brocade_info(session: &mut BrocadeSession) -> Result<BrocadeInfo, Error> {
|
||||||
|
let output = session.run_command("show version").await?;
|
||||||
|
|
||||||
|
if output.contains("Network Operating System") {
|
||||||
|
let re = Regex::new(r"Network Operating System Version:\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||||
|
.expect("Invalid regex");
|
||||||
|
let version = re
|
||||||
|
.captures(&output)
|
||||||
|
.and_then(|cap| cap.name("version"))
|
||||||
|
.map(|m| m.as_str().to_string())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
return Ok(BrocadeInfo {
|
||||||
|
os: BrocadeOs::NetworkOperatingSystem,
|
||||||
|
_version: version,
|
||||||
|
});
|
||||||
|
} else if output.contains("ICX") {
|
||||||
|
let re = Regex::new(r"(?m)^\s*SW: Version\s*(?P<version>[a-zA-Z0-9.\-]+)")
|
||||||
|
.expect("Invalid regex");
|
||||||
|
let version = re
|
||||||
|
.captures(&output)
|
||||||
|
.and_then(|cap| cap.name("version"))
|
||||||
|
.map(|m| m.as_str().to_string())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
return Ok(BrocadeInfo {
|
||||||
|
os: BrocadeOs::FastIron,
|
||||||
|
_version: version,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::UnexpectedError("Unknown Brocade OS version".into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
||||||
|
let cleaned_mac = value.replace('.', "");
|
||||||
|
|
||||||
|
if cleaned_mac.len() != 12 {
|
||||||
|
return Err(format!("Invalid MAC address: {value}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bytes = [0u8; 6];
|
||||||
|
for (i, pair) in cleaned_mac.as_bytes().chunks(2).enumerate() {
|
||||||
|
let byte_str = std::str::from_utf8(pair).map_err(|_| "Invalid UTF-8")?;
|
||||||
|
bytes[i] =
|
||||||
|
u8::from_str_radix(byte_str, 16).map_err(|_| format!("Invalid hex in MAC: {value}"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(MacAddress(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
NetworkError(String),
|
||||||
|
AuthenticationError(String),
|
||||||
|
ConfigurationError(String),
|
||||||
|
TimeoutError(String),
|
||||||
|
UnexpectedError(String),
|
||||||
|
CommandError(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Error::NetworkError(msg) => write!(f, "Network error: {msg}"),
|
||||||
|
Error::AuthenticationError(msg) => write!(f, "Authentication error: {msg}"),
|
||||||
|
Error::ConfigurationError(msg) => write!(f, "Configuration error: {msg}"),
|
||||||
|
Error::TimeoutError(msg) => write!(f, "Timeout error: {msg}"),
|
||||||
|
Error::UnexpectedError(msg) => write!(f, "Unexpected error: {msg}"),
|
||||||
|
Error::CommandError(msg) => write!(f, "{msg}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for String {
|
||||||
|
fn from(val: Error) -> Self {
|
||||||
|
format!("{val}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for Error {}
|
||||||
|
|
||||||
|
impl From<russh::Error> for Error {
|
||||||
|
fn from(value: russh::Error) -> Self {
|
||||||
|
Error::NetworkError(format!("Russh client error: {value}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
333
brocade/src/network_operating_system.rs
Normal file
333
brocade/src/network_operating_system.rs
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
|
use log::{debug, info};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||||
|
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
|
parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NetworkOperatingSystemClient {
|
||||||
|
shell: BrocadeShell,
|
||||||
|
version: BrocadeInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkOperatingSystemClient {
|
||||||
|
pub fn init(mut shell: BrocadeShell, version_info: BrocadeInfo) -> Self {
|
||||||
|
shell.before_all(vec!["terminal length 0".into()]);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
shell,
|
||||||
|
version: version_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_mac_entry(&self, line: &str) -> Option<Result<MacAddressEntry, Error>> {
|
||||||
|
debug!("[Brocade] Parsing mac address entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 5 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (vlan, mac_address, port) = match parts.len() {
|
||||||
|
5 => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[4].to_string(),
|
||||||
|
),
|
||||||
|
_ => (
|
||||||
|
u16::from_str(parts[0]).ok()?,
|
||||||
|
parse_brocade_mac_address(parts[1]).ok()?,
|
||||||
|
parts[5].to_string(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let port =
|
||||||
|
PortDeclaration::parse(&port).map_err(|e| Error::UnexpectedError(format!("{e}")));
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Ok(p) => Some(Ok(MacAddressEntry {
|
||||||
|
vlan,
|
||||||
|
mac_address,
|
||||||
|
port: p,
|
||||||
|
})),
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_inter_switch_link_entry(&self, line: &str) -> Option<Result<InterSwitchLink, Error>> {
|
||||||
|
debug!("[Brocade] Parsing inter switch link entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 10 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let local_port = PortLocation::from_str(parts[2]).ok()?;
|
||||||
|
let remote_port = PortLocation::from_str(parts[5]).ok()?;
|
||||||
|
|
||||||
|
Some(Ok(InterSwitchLink {
|
||||||
|
local_port,
|
||||||
|
remote_port: Some(remote_port),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_interface_status_entry(&self, line: &str) -> Option<Result<InterfaceInfo, Error>> {
|
||||||
|
debug!("[Brocade] Parsing interface status entry: {line}");
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 6 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let interface_type = match parts[0] {
|
||||||
|
"Fo" => InterfaceType::Ethernet("FortyGigabitEthernet".to_string()),
|
||||||
|
"Te" => InterfaceType::Ethernet("TenGigabitEthernet".to_string()),
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
let port_location = PortLocation::from_str(parts[1]).ok()?;
|
||||||
|
let status = match parts[2] {
|
||||||
|
"connected" => InterfaceStatus::Connected,
|
||||||
|
"notconnected" => InterfaceStatus::NotConnected,
|
||||||
|
"sfpAbsent" => InterfaceStatus::SfpAbsent,
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
let operating_mode = match parts[3] {
|
||||||
|
"ISL" => Some(PortOperatingMode::Fabric),
|
||||||
|
"Trunk" => Some(PortOperatingMode::Trunk),
|
||||||
|
"Access" => Some(PortOperatingMode::Access),
|
||||||
|
"--" => None,
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(Ok(InterfaceInfo {
|
||||||
|
name: format!("{interface_type} {port_location}"),
|
||||||
|
port_location,
|
||||||
|
interface_type,
|
||||||
|
operating_mode,
|
||||||
|
status,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
||||||
|
debug!("[Brocade] {err}");
|
||||||
|
|
||||||
|
if let Error::CommandError(message) = &err {
|
||||||
|
if message.contains("switchport")
|
||||||
|
&& message.contains("Cannot configure aggregator member")
|
||||||
|
{
|
||||||
|
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
||||||
|
|
||||||
|
if let Some(caps) = re.captures(message) {
|
||||||
|
let interface_type = &caps[1];
|
||||||
|
let port_location = &caps[2];
|
||||||
|
let interface = format!("{interface_type} {port_location}");
|
||||||
|
|
||||||
|
return Error::CommandError(format!(
|
||||||
|
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for NetworkOperatingSystemClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
Ok(self.version.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show mac-address-table", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(1)
|
||||||
|
.filter_map(|line| self.parse_mac_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show fabric isl", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(6)
|
||||||
|
.filter_map(|line| self.parse_inter_switch_link_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command(
|
||||||
|
"show interface status rbridge-id all",
|
||||||
|
ExecutionMode::Regular,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
output
|
||||||
|
.lines()
|
||||||
|
.skip(2)
|
||||||
|
.filter_map(|line| self.parse_interface_status_entry(line))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||||
|
|
||||||
|
let mut commands = vec!["configure terminal".to_string()];
|
||||||
|
|
||||||
|
for interface in interfaces {
|
||||||
|
commands.push(format!("interface {}", interface.0));
|
||||||
|
|
||||||
|
match interface.1 {
|
||||||
|
PortOperatingMode::Fabric => {
|
||||||
|
commands.push("fabric isl enable".into());
|
||||||
|
commands.push("fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
PortOperatingMode::Trunk => {
|
||||||
|
commands.push("switchport".into());
|
||||||
|
commands.push("switchport mode trunk".into());
|
||||||
|
commands.push("no spanning-tree shutdown".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
PortOperatingMode::Access => {
|
||||||
|
commands.push("switchport".into());
|
||||||
|
commands.push("switchport mode access".into());
|
||||||
|
commands.push("switchport access vlan 1".into());
|
||||||
|
commands.push("no spanning-tree shutdown".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commands.push("no shutdown".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await
|
||||||
|
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
||||||
|
|
||||||
|
info!("[Brocade] Interfaces configured.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
info!("[Brocade] Finding next available channel id...");
|
||||||
|
|
||||||
|
let output = self
|
||||||
|
.shell
|
||||||
|
.run_command("show port-channel summary", ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let used_ids: Vec<u8> = output
|
||||||
|
.lines()
|
||||||
|
.skip(6)
|
||||||
|
.filter_map(|line| {
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 8 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8::from_str(parts[0]).ok()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut next_id: u8 = 1;
|
||||||
|
loop {
|
||||||
|
if !used_ids.contains(&next_id) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Brocade] Found channel id: {next_id}");
|
||||||
|
Ok(next_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_id: PortChannelId,
|
||||||
|
channel_name: &str,
|
||||||
|
ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
info!(
|
||||||
|
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
||||||
|
ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| format!("{p}"))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", ")
|
||||||
|
);
|
||||||
|
|
||||||
|
let interfaces = self.get_interfaces().await?;
|
||||||
|
|
||||||
|
let mut commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
format!("interface port-channel {}", channel_id),
|
||||||
|
"no shutdown".into(),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for port in ports {
|
||||||
|
let interface = interfaces.iter().find(|i| i.port_location == *port);
|
||||||
|
let Some(interface) = interface else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
commands.push(format!("interface {}", interface.name));
|
||||||
|
commands.push("no switchport".into());
|
||||||
|
commands.push("no ip address".into());
|
||||||
|
commands.push("no fabric isl enable".into());
|
||||||
|
commands.push("no fabric trunk enable".into());
|
||||||
|
commands.push(format!("channel-group {channel_id} mode active"));
|
||||||
|
commands.push("no shutdown".into());
|
||||||
|
commands.push("exit".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' configured.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, channel_name: &str) -> Result<(), Error> {
|
||||||
|
info!("[Brocade] Clearing port-channel: {channel_name}");
|
||||||
|
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
format!("no interface port-channel {}", channel_name),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
370
brocade/src/shell.rs
Normal file
370
brocade/src/shell.rs
Normal file
@@ -0,0 +1,370 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use crate::BrocadeOptions;
|
||||||
|
use crate::Error;
|
||||||
|
use crate::ExecutionMode;
|
||||||
|
use crate::TimeoutConfig;
|
||||||
|
use crate::ssh;
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
use log::info;
|
||||||
|
use russh::ChannelMsg;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BrocadeShell {
|
||||||
|
ip: IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
options: BrocadeOptions,
|
||||||
|
before_all_commands: Vec<String>,
|
||||||
|
after_all_commands: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeShell {
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddr],
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let ip = ip_addresses
|
||||||
|
.first()
|
||||||
|
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||||
|
|
||||||
|
let base_options = options.unwrap_or_default();
|
||||||
|
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
ip: *ip,
|
||||||
|
port,
|
||||||
|
username: username.to_string(),
|
||||||
|
password: password.to_string(),
|
||||||
|
before_all_commands: vec![],
|
||||||
|
after_all_commands: vec![],
|
||||||
|
options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||||
|
BrocadeSession::open(
|
||||||
|
self.ip,
|
||||||
|
self.port,
|
||||||
|
&self.username,
|
||||||
|
&self.password,
|
||||||
|
self.options.clone(),
|
||||||
|
mode,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn with_session<F, R>(&self, mode: ExecutionMode, callback: F) -> Result<R, Error>
|
||||||
|
where
|
||||||
|
F: FnOnce(
|
||||||
|
&mut BrocadeSession,
|
||||||
|
) -> std::pin::Pin<
|
||||||
|
Box<dyn std::future::Future<Output = Result<R, Error>> + Send + '_>,
|
||||||
|
>,
|
||||||
|
{
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = callback(&mut session).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_command(&self, command: &str, mode: ExecutionMode) -> Result<String, Error> {
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = session.run_command(command).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_commands(
|
||||||
|
&self,
|
||||||
|
commands: Vec<String>,
|
||||||
|
mode: ExecutionMode,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut session = self.open_session(mode).await?;
|
||||||
|
|
||||||
|
let _ = session.run_commands(self.before_all_commands.clone()).await;
|
||||||
|
let result = session.run_commands(commands).await;
|
||||||
|
let _ = session.run_commands(self.after_all_commands.clone()).await;
|
||||||
|
|
||||||
|
session.close().await?;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn before_all(&mut self, commands: Vec<String>) {
|
||||||
|
self.before_all_commands = commands;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn after_all(&mut self, commands: Vec<String>) {
|
||||||
|
self.after_all_commands = commands;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BrocadeSession {
|
||||||
|
pub channel: russh::Channel<russh::client::Msg>,
|
||||||
|
pub mode: ExecutionMode,
|
||||||
|
pub options: BrocadeOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeSession {
|
||||||
|
pub async fn open(
|
||||||
|
ip: IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: BrocadeOptions,
|
||||||
|
mode: ExecutionMode,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let client = ssh::create_client(ip, port, username, password, &options).await?;
|
||||||
|
let mut channel = client.channel_open_session().await?;
|
||||||
|
|
||||||
|
channel
|
||||||
|
.request_pty(false, "vt100", 80, 24, 0, 0, &[])
|
||||||
|
.await?;
|
||||||
|
channel.request_shell(false).await?;
|
||||||
|
|
||||||
|
wait_for_shell_ready(&mut channel, &options.timeouts).await?;
|
||||||
|
|
||||||
|
if let ExecutionMode::Privileged = mode {
|
||||||
|
try_elevate_session(&mut channel, username, password, &options.timeouts).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
channel,
|
||||||
|
mode,
|
||||||
|
options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn close(&mut self) -> Result<(), Error> {
|
||||||
|
debug!("[Brocade] Closing session...");
|
||||||
|
|
||||||
|
self.channel.data(&b"exit\n"[..]).await?;
|
||||||
|
if let ExecutionMode::Privileged = self.mode {
|
||||||
|
self.channel.data(&b"exit\n"[..]).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
while start.elapsed() < self.options.timeouts.cleanup {
|
||||||
|
match timeout(self.options.timeouts.message_wait, self.channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Close)) => break,
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) | Err(_) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("[Brocade] Session closed.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_command(&mut self, command: &str) -> Result<String, Error> {
|
||||||
|
if self.should_skip_command(command) {
|
||||||
|
return Ok(String::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("[Brocade] Running command: '{command}'...");
|
||||||
|
|
||||||
|
self.channel
|
||||||
|
.data(format!("{}\n", command).as_bytes())
|
||||||
|
.await?;
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
|
let output = self.collect_command_output().await?;
|
||||||
|
let output = String::from_utf8(output)
|
||||||
|
.map_err(|_| Error::UnexpectedError("Invalid UTF-8 in command output".to_string()))?;
|
||||||
|
|
||||||
|
self.check_for_command_errors(&output, command)?;
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_commands(&mut self, commands: Vec<String>) -> Result<(), Error> {
|
||||||
|
for command in commands {
|
||||||
|
self.run_command(&command).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_skip_command(&self, command: &str) -> bool {
|
||||||
|
if (command.starts_with("write") || command.starts_with("deploy")) && self.options.dry_run {
|
||||||
|
info!("[Brocade] Dry-run mode enabled, skipping command: {command}");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn collect_command_output(&mut self) -> Result<Vec<u8>, Error> {
|
||||||
|
let mut output = Vec::new();
|
||||||
|
let start = Instant::now();
|
||||||
|
let read_timeout = Duration::from_millis(500);
|
||||||
|
let log_interval = Duration::from_secs(5);
|
||||||
|
let mut last_log = Instant::now();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if start.elapsed() > self.options.timeouts.command_execution {
|
||||||
|
return Err(Error::TimeoutError(
|
||||||
|
"Timeout waiting for command completion.".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if start.elapsed() > self.options.timeouts.command_output
|
||||||
|
&& last_log.elapsed() > log_interval
|
||||||
|
{
|
||||||
|
info!("[Brocade] Waiting for command output...");
|
||||||
|
last_log = Instant::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
match timeout(read_timeout, self.channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data } | ChannelMsg::ExtendedData { data, .. })) => {
|
||||||
|
output.extend_from_slice(&data);
|
||||||
|
let current_output = String::from_utf8_lossy(&output);
|
||||||
|
if current_output.contains('>') || current_output.contains('#') {
|
||||||
|
return Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(ChannelMsg::Eof | ChannelMsg::Close)) => return Ok(output),
|
||||||
|
Ok(Some(ChannelMsg::ExitStatus { exit_status })) => {
|
||||||
|
debug!("[Brocade] Command exit status: {exit_status}");
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) | Err(_) => {
|
||||||
|
if output.is_empty() {
|
||||||
|
if let Ok(None) = timeout(read_timeout, self.channel.wait()).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
let current_output = String::from_utf8_lossy(&output);
|
||||||
|
if current_output.contains('>') || current_output.contains('#') {
|
||||||
|
return Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_for_command_errors(&self, output: &str, command: &str) -> Result<(), Error> {
|
||||||
|
const ERROR_PATTERNS: &[&str] = &[
|
||||||
|
"invalid input",
|
||||||
|
"syntax error",
|
||||||
|
"command not found",
|
||||||
|
"unknown command",
|
||||||
|
"permission denied",
|
||||||
|
"access denied",
|
||||||
|
"authentication failed",
|
||||||
|
"configuration error",
|
||||||
|
"failed to",
|
||||||
|
"error:",
|
||||||
|
];
|
||||||
|
|
||||||
|
let output_lower = output.to_lowercase();
|
||||||
|
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||||
|
return Err(Error::CommandError(format!(
|
||||||
|
"Command error: {}",
|
||||||
|
output.trim()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !command.starts_with("show") && output.trim().is_empty() {
|
||||||
|
return Err(Error::CommandError(format!(
|
||||||
|
"Command '{command}' produced no output"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_shell_ready(
|
||||||
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
|
timeouts: &TimeoutConfig,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
while start.elapsed() < timeouts.shell_ready {
|
||||||
|
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data })) => {
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
let output = output.trim();
|
||||||
|
if output.ends_with('>') || output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Shell ready");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(_) => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_elevate_session(
|
||||||
|
channel: &mut russh::Channel<russh::client::Msg>,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
timeouts: &TimeoutConfig,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
channel.data(&b"enable\n"[..]).await?;
|
||||||
|
let start = Instant::now();
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
|
while start.elapsed() < timeouts.shell_ready {
|
||||||
|
match timeout(timeouts.message_wait, channel.wait()).await {
|
||||||
|
Ok(Some(ChannelMsg::Data { data })) => {
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
|
||||||
|
if output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Privileged mode established");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.contains("User Name:") {
|
||||||
|
channel.data(format!("{}\n", username).as_bytes()).await?;
|
||||||
|
buffer.clear();
|
||||||
|
} else if output.contains("Password:") {
|
||||||
|
channel.data(format!("{}\n", password).as_bytes()).await?;
|
||||||
|
buffer.clear();
|
||||||
|
} else if output.contains('>') {
|
||||||
|
return Err(Error::AuthenticationError(
|
||||||
|
"Enable authentication failed".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some(_)) => continue,
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(_) => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let output = String::from_utf8_lossy(&buffer);
|
||||||
|
if output.ends_with('#') {
|
||||||
|
debug!("[Brocade] Privileged mode established");
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::AuthenticationError(format!(
|
||||||
|
"Enable failed. Output:\n{output}"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
113
brocade/src/ssh.rs
Normal file
113
brocade/src/ssh.rs
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
use std::borrow::Cow;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use russh::client::Handler;
|
||||||
|
use russh::kex::DH_G1_SHA1;
|
||||||
|
use russh::kex::ECDH_SHA2_NISTP256;
|
||||||
|
use russh_keys::key::SSH_RSA;
|
||||||
|
|
||||||
|
use super::BrocadeOptions;
|
||||||
|
use super::Error;
|
||||||
|
|
||||||
|
#[derive(Default, Clone, Debug)]
|
||||||
|
pub struct SshOptions {
|
||||||
|
pub preferred_algorithms: russh::Preferred,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SshOptions {
|
||||||
|
fn ecdhsa_sha2_nistp256() -> Self {
|
||||||
|
Self {
|
||||||
|
preferred_algorithms: russh::Preferred {
|
||||||
|
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||||
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn legacy() -> Self {
|
||||||
|
Self {
|
||||||
|
preferred_algorithms: russh::Preferred {
|
||||||
|
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||||
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Client;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Handler for Client {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
async fn check_server_key(
|
||||||
|
&mut self,
|
||||||
|
_server_public_key: &russh_keys::key::PublicKey,
|
||||||
|
) -> Result<bool, Self::Error> {
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn try_init_client(
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
base_options: BrocadeOptions,
|
||||||
|
) -> Result<BrocadeOptions, Error> {
|
||||||
|
let ssh_options = vec![
|
||||||
|
SshOptions::default(),
|
||||||
|
SshOptions::ecdhsa_sha2_nistp256(),
|
||||||
|
SshOptions::legacy(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for ssh in ssh_options {
|
||||||
|
let opts = BrocadeOptions {
|
||||||
|
ssh,
|
||||||
|
..base_options.clone()
|
||||||
|
};
|
||||||
|
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||||
|
|
||||||
|
match client {
|
||||||
|
Ok(_) => {
|
||||||
|
return Ok(opts);
|
||||||
|
}
|
||||||
|
Err(e) => match e {
|
||||||
|
Error::NetworkError(e) => {
|
||||||
|
if e.contains("No common key exchange algorithm") {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
return Err(Error::NetworkError(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => return Err(e),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::NetworkError(
|
||||||
|
"Could not establish ssh connection: wrong key exchange algorithm)".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_client(
|
||||||
|
ip: std::net::IpAddr,
|
||||||
|
port: u16,
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: &BrocadeOptions,
|
||||||
|
) -> Result<russh::client::Handle<Client>, Error> {
|
||||||
|
let config = russh::client::Config {
|
||||||
|
preferred: options.ssh.preferred_algorithms.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let mut client = russh::client::connect(Arc::new(config), (ip, port), Client {}).await?;
|
||||||
|
if !client.authenticate_password(username, password).await? {
|
||||||
|
return Err(Error::AuthenticationError(
|
||||||
|
"ssh authentication failed".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
105
docs/modules/Multisite_PostgreSQL.md
Normal file
105
docs/modules/Multisite_PostgreSQL.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# Design Document: Harmony PostgreSQL Module
|
||||||
|
|
||||||
|
**Status:** Draft
|
||||||
|
**Last Updated:** 2025-12-01
|
||||||
|
**Context:** Multi-site Data Replication & Orchestration
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
The Harmony PostgreSQL Module provides a high-level abstraction for deploying and managing high-availability PostgreSQL clusters across geographically distributed Kubernetes/OKD sites.
|
||||||
|
|
||||||
|
Instead of manually configuring complex replication slots, firewalls, and operator settings on each cluster, users define a single intent (a **Score**), and Harmony orchestrates the underlying infrastructure (the **Arrangement**) to establish a Primary-Replica architecture.
|
||||||
|
|
||||||
|
Currently, the implementation relies on the **CloudNativePG (CNPG)** operator as the backing engine.
|
||||||
|
|
||||||
|
## 2. Architecture
|
||||||
|
|
||||||
|
### 2.1 The Abstraction Model
|
||||||
|
Following **ADR 003 (Infrastructure Abstraction)**, Harmony separates the *intent* from the *implementation*.
|
||||||
|
|
||||||
|
1. **The Score (Intent):** The user defines a `MultisitePostgreSQL` resource. This describes *what* is needed (e.g., "A Postgres 15 cluster with 10GB storage, Primary on Site A, Replica on Site B").
|
||||||
|
2. **The Interpret (Action):** Harmony MultisitePostgreSQLInterpret processes this Score and orchestrates the deployment on both sites to reach the state defined in the Score.
|
||||||
|
3. **The Capability (Implementation):** The PostgreSQL Capability is implemented by the K8sTopology and the interpret can deploy it, configure it and fetch information about it. The concrete implementation will rely on the mature CloudnativePG operator to manage all the Kubernetes resources required.
|
||||||
|
|
||||||
|
### 2.2 Network Connectivity (TLS Passthrough)
|
||||||
|
|
||||||
|
One of the critical challenges in multi-site orchestration is secure connectivity between clusters that may have dynamic IPs or strict firewalls.
|
||||||
|
|
||||||
|
To solve this, we utilize **OKD/OpenShift Routes with TLS Passthrough**.
|
||||||
|
|
||||||
|
* **Mechanism:** The Primary site exposes a `Route` configured for `termination: passthrough`.
|
||||||
|
* **Routing:** The OpenShift HAProxy router inspects the **SNI (Server Name Indication)** header of the incoming TCP connection to route traffic to the correct PostgreSQL Pod.
|
||||||
|
* **Security:** SSL is **not** terminated at the ingress router. The encrypted stream is passed directly to the PostgreSQL instance. Mutual TLS (mTLS) authentication is handled natively by CNPG between the Primary and Replica instances.
|
||||||
|
* **Dynamic IPs:** Because connections are established via DNS hostnames (the Route URL), this architecture is resilient to dynamic IP changes at the Primary site.
|
||||||
|
|
||||||
|
#### Traffic Flow Diagram
|
||||||
|
|
||||||
|
```text
|
||||||
|
[ Site B: Replica ] [ Site A: Primary ]
|
||||||
|
| |
|
||||||
|
(CNPG Instance) --[Encrypted TCP]--> (OKD HAProxy Router)
|
||||||
|
| (Port 443) |
|
||||||
|
| |
|
||||||
|
| [SNI Inspection]
|
||||||
|
| |
|
||||||
|
| v
|
||||||
|
| (PostgreSQL Primary Pod)
|
||||||
|
| (Port 5432)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Design Decisions
|
||||||
|
|
||||||
|
### Why CloudNativePG?
|
||||||
|
We selected CloudNativePG because it relies exclusively on standard Kubernetes primitives and uses the native PostgreSQL replication protocol (WAL shipping/Streaming). This aligns with Harmony's goal of being "K8s Native."
|
||||||
|
|
||||||
|
### Why TLS Passthrough instead of VPN/NodePort?
|
||||||
|
* **NodePort:** Requires static IPs and opening non-standard ports on the firewall, which violates our security constraints.
|
||||||
|
* **VPN (e.g., Wireguard/Tailscale):** While secure, it introduces significant complexity (sidecars, key management) and external dependencies.
|
||||||
|
* **TLS Passthrough:** Leverages the existing Ingress/Router infrastructure already present in OKD. It requires zero additional software and respects multi-tenancy (Routes are namespaced).
|
||||||
|
|
||||||
|
### Configuration Philosophy (YAGNI)
|
||||||
|
The current design exposes a **generic configuration surface**. Users can configure standard parameters (Storage size, CPU/Memory requests, Postgres version).
|
||||||
|
|
||||||
|
**We explicitly do not expose advanced CNPG or PostgreSQL configurations at this stage.**
|
||||||
|
|
||||||
|
* **Reasoning:** We aim to keep the API surface small and manageable.
|
||||||
|
* **Future Path:** We plan to implement a "pass-through" mechanism to allow sending raw config maps or custom parameters to the underlying engine (CNPG) *only when a concrete use case arises*. Until then, we adhere to the **YAGNI (You Ain't Gonna Need It)** principle to avoid premature optimization and API bloat.
|
||||||
|
|
||||||
|
## 4. Usage Guide
|
||||||
|
|
||||||
|
To deploy a multi-site cluster, apply the `MultisitePostgreSQL` resource to the Harmony Control Plane.
|
||||||
|
|
||||||
|
### Example Manifest
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: harmony.io/v1alpha1
|
||||||
|
kind: MultisitePostgreSQL
|
||||||
|
metadata:
|
||||||
|
name: finance-db
|
||||||
|
namespace: tenant-a
|
||||||
|
spec:
|
||||||
|
version: "15"
|
||||||
|
storage: "10Gi"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "1Gi"
|
||||||
|
|
||||||
|
# Topology Definition
|
||||||
|
topology:
|
||||||
|
primary:
|
||||||
|
site: "site-paris" # The name of the cluster in Harmony
|
||||||
|
replicas:
|
||||||
|
- site: "site-newyork"
|
||||||
|
```
|
||||||
|
|
||||||
|
### What happens next?
|
||||||
|
1. Harmony detects the CR.
|
||||||
|
2. **On Site Paris:** It deploys a CNPG Cluster (Primary) and creates a Passthrough Route `postgres-finance-db.apps.site-paris.example.com`.
|
||||||
|
3. **On Site New York:** It deploys a CNPG Cluster (Replica) configured with `externalClusters` pointing to the Paris Route.
|
||||||
|
4. Data begins replicating immediately over the encrypted channel.
|
||||||
|
|
||||||
|
## 5. Troubleshooting
|
||||||
|
|
||||||
|
* **Connection Refused:** Ensure the Primary site's Route is successfully admitted by the Ingress Controller.
|
||||||
|
* **Certificate Errors:** CNPG manages mTLS automatically. If errors persist, ensure the CA secrets were correctly propagated by Harmony from Primary to Replica namespaces.
|
||||||
18
examples/multisite_postgres/Cargo.toml
Normal file
18
examples/multisite_postgres/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-multisite-postgres"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
3
examples/multisite_postgres/env_example.sh
Normal file
3
examples/multisite_postgres/env_example.sh
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
export HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY="context=default/api-your-openshift-cluster:6443/kube:admin"
|
||||||
|
export HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA="context=someuser/somecluster"
|
||||||
|
export RUST_LOG="harmony=debug"
|
||||||
28
examples/multisite_postgres/src/main.rs
Normal file
28
examples/multisite_postgres/src/main.rs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::postgresql::{PublicPostgreSQLScore, capability::PostgreSQLConfig},
|
||||||
|
topology::{FailoverTopology, K8sAnywhereTopology},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
// env_logger::init();
|
||||||
|
let postgres = PublicPostgreSQLScore {
|
||||||
|
config: PostgreSQLConfig {
|
||||||
|
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||||
|
namespace: "harmony-public-postgres".to_string(),
|
||||||
|
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||||
|
// "default" namespace, 1 instance, 1Gi storage
|
||||||
|
},
|
||||||
|
hostname: "postgrestest.sto1.nationtech.io".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
FailoverTopology::<K8sAnywhereTopology>::from_env(),
|
||||||
|
vec![Box::new(postgres)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -17,3 +17,5 @@ harmony_secret = { path = "../../harmony_secret" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::Arc,
|
sync::{Arc, OnceLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::SshKeyPair,
|
config::secret::SshKeyPair,
|
||||||
data::{FileContent, FilePath},
|
data::{FileContent, FilePath},
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
http::StaticFilesHttpScore,
|
http::StaticFilesHttpScore,
|
||||||
@@ -22,8 +23,9 @@ use harmony::{
|
|||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
use harmony_secret::SecretManager;
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -32,6 +34,26 @@ async fn main() {
|
|||||||
name: String::from("fw0"),
|
name: String::from("fw0"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
);
|
);
|
||||||
@@ -39,6 +61,7 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||||
// when setting up the opnsense firewall
|
// when setting up the opnsense firewall
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
@@ -83,7 +106,8 @@ async fn main() {
|
|||||||
name: "wk2".to_string(),
|
name: "wk2".to_string(),
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
@@ -166,3 +190,9 @@ async fn main() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,3 +19,4 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -1,14 +1,18 @@
|
|||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{
|
||||||
|
net::IpAddr,
|
||||||
|
sync::{Arc, OnceLock},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
struct OPNSenseFirewallConfig {
|
struct OPNSenseFirewallConfig {
|
||||||
@@ -22,6 +26,26 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallConfig>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -38,6 +62,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -58,7 +83,8 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "bootstrap".to_string(),
|
name: "bootstrap".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,3 +101,9 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,3 +19,4 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
|||||||
@@ -1,14 +1,19 @@
|
|||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::OPNSenseFirewallCredentials,
|
config::secret::OPNSenseFirewallCredentials,
|
||||||
hardware::{Location, SwitchGroup},
|
hardware::{Location, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
topology::{HAClusterTopology, LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::SecretManager;
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{
|
||||||
|
net::IpAddr,
|
||||||
|
sync::{Arc, OnceLock},
|
||||||
|
};
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = harmony::topology::LogicalHost {
|
||||||
@@ -16,6 +21,26 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
let config = SecretManager::get_or_prompt::<OPNSenseFirewallCredentials>().await;
|
||||||
let config = config.unwrap();
|
let config = config.unwrap();
|
||||||
|
|
||||||
@@ -32,6 +57,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -52,7 +78,8 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,3 +96,9 @@ pub fn get_inventory() -> Inventory {
|
|||||||
control_plane_host: vec![],
|
control_plane_host: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
14
examples/openbao/Cargo.toml
Normal file
14
examples/openbao/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-openbao"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
tokio.workspace = true
|
||||||
|
url.workspace = true
|
||||||
7
examples/openbao/README.md
Normal file
7
examples/openbao/README.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
To install an openbao instance with harmony simply `cargo run -p example-openbao` .
|
||||||
|
|
||||||
|
Depending on your environement configuration, it will either install a k3d cluster locally and deploy on it, or install to a remote cluster.
|
||||||
|
|
||||||
|
Then follow the openbao documentation to initialize and unseal, this will make openbao usable.
|
||||||
|
|
||||||
|
https://openbao.org/docs/platform/k8s/helm/run/
|
||||||
67
examples/openbao/src/main.rs
Normal file
67
examples/openbao/src/main.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::helm::chart::{HelmChartScore, HelmRepository, NonBlankString},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
use harmony_macros::hurl;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let values_yaml = Some(
|
||||||
|
r#"server:
|
||||||
|
standalone:
|
||||||
|
enabled: true
|
||||||
|
config: |
|
||||||
|
listener "tcp" {
|
||||||
|
tls_disable = true
|
||||||
|
address = "[::]:8200"
|
||||||
|
cluster_address = "[::]:8201"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage "file" {
|
||||||
|
path = "/openbao/data"
|
||||||
|
}
|
||||||
|
|
||||||
|
service:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
dataStorage:
|
||||||
|
enabled: true
|
||||||
|
size: 10Gi
|
||||||
|
storageClass: null
|
||||||
|
accessMode: ReadWriteOnce
|
||||||
|
|
||||||
|
auditStorage:
|
||||||
|
enabled: true
|
||||||
|
size: 10Gi
|
||||||
|
storageClass: null
|
||||||
|
accessMode: ReadWriteOnce"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
let openbao = HelmChartScore {
|
||||||
|
namespace: Some(NonBlankString::from_str("openbao").unwrap()),
|
||||||
|
release_name: NonBlankString::from_str("openbao").unwrap(),
|
||||||
|
chart_name: NonBlankString::from_str("openbao/openbao").unwrap(),
|
||||||
|
chart_version: None,
|
||||||
|
values_overrides: None,
|
||||||
|
values_yaml,
|
||||||
|
create_namespace: true,
|
||||||
|
install_only: true,
|
||||||
|
repository: Some(HelmRepository::new(
|
||||||
|
"openbao".to_string(),
|
||||||
|
hurl!("https://openbao.github.io/openbao-helm"),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(openbao)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
18
examples/operatorhub_catalog/Cargo.toml
Normal file
18
examples/operatorhub_catalog/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
22
examples/operatorhub_catalog/src/main.rs
Normal file
22
examples/operatorhub_catalog/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||||
|
let cnpg_operator = CloudNativePgOperatorScore::default();
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -16,3 +16,6 @@ harmony_macros = { path = "../../harmony_macros" }
|
|||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
serde = { workspace = true }
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::Arc,
|
sync::{Arc, OnceLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use harmony::{
|
use harmony::{
|
||||||
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
infra::opnsense::OPNSenseManagementInterface,
|
infra::{brocade::BrocadeSwitchClient, opnsense::OPNSenseManagementInterface},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
@@ -18,7 +19,9 @@ use harmony::{
|
|||||||
topology::{LogicalHost, UnmanagedRouter},
|
topology::{LogicalHost, UnmanagedRouter},
|
||||||
};
|
};
|
||||||
use harmony_macros::{ip, mac_address};
|
use harmony_macros::{ip, mac_address};
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
@@ -27,6 +30,26 @@ async fn main() {
|
|||||||
name: String::from("opnsense-1"),
|
name: String::from("opnsense-1"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let switch_auth = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
|
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||||
|
let brocade_options = Some(BrocadeOptions {
|
||||||
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
|
&switches,
|
||||||
|
&switch_auth.username,
|
||||||
|
&switch_auth.password,
|
||||||
|
brocade_options,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let switch_client = Arc::new(switch_client);
|
||||||
|
|
||||||
let opnsense = Arc::new(
|
let opnsense = Arc::new(
|
||||||
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
);
|
);
|
||||||
@@ -34,6 +57,7 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -54,7 +78,8 @@ async fn main() {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
@@ -109,3 +134,9 @@ async fn main() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct BrocadeSwitchAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
}
|
||||||
|
|||||||
18
examples/postgresql/Cargo.toml
Normal file
18
examples/postgresql/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-postgresql"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
26
examples/postgresql/src/main.rs
Normal file
26
examples/postgresql/src/main.rs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::postgresql::{PostgreSQLScore, capability::PostgreSQLConfig},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let postgresql = PostgreSQLScore {
|
||||||
|
config: PostgreSQLConfig {
|
||||||
|
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||||
|
namespace: "harmony-postgres-example".to_string(),
|
||||||
|
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||||
|
// "default" namespace, 1 instance, 1Gi storage
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(postgresql)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
18
examples/public_postgres/Cargo.toml
Normal file
18
examples/public_postgres/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-public-postgres"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
38
examples/public_postgres/src/main.rs
Normal file
38
examples/public_postgres/src/main.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::postgresql::{
|
||||||
|
K8sPostgreSQLScore, PostgreSQLConnectionScore, PublicPostgreSQLScore,
|
||||||
|
capability::PostgreSQLConfig,
|
||||||
|
},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let postgres = PublicPostgreSQLScore {
|
||||||
|
config: PostgreSQLConfig {
|
||||||
|
cluster_name: "harmony-postgres-example".to_string(), // Override default name
|
||||||
|
namespace: "harmony-public-postgres".to_string(),
|
||||||
|
..Default::default() // Use harmony defaults, they are based on CNPG's default values :
|
||||||
|
// 1 instance, 1Gi storage
|
||||||
|
},
|
||||||
|
hostname: "postgrestest.sto1.nationtech.io".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let test_connection = PostgreSQLConnectionScore {
|
||||||
|
name: "harmony-postgres-example".to_string(),
|
||||||
|
namespace: "harmony-public-postgres".to_string(),
|
||||||
|
cluster_name: "harmony-postgres-example".to_string(),
|
||||||
|
hostname: Some("postgrestest.sto1.nationtech.io".to_string()),
|
||||||
|
port_override: Some(443),
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(postgres), Box::new(test_connection)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
11
examples/remove_rook_osd/Cargo.toml
Normal file
11
examples/remove_rook_osd/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-remove-rook-osd"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { version = "0.1.0", path = "../../harmony" }
|
||||||
|
harmony_cli = { version = "0.1.0", path = "../../harmony_cli" }
|
||||||
|
tokio.workspace = true
|
||||||
18
examples/remove_rook_osd/src/main.rs
Normal file
18
examples/remove_rook_osd/src/main.rs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::Inventory, modules::storage::ceph::ceph_remove_osd_score::CephRemoveOsd,
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let ceph_score = CephRemoveOsd {
|
||||||
|
osd_deployment_name: "rook-ceph-osd-2".to_string(),
|
||||||
|
rook_ceph_namespace: "rook-ceph".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let topology = K8sAnywhereTopology::from_env();
|
||||||
|
let inventory = Inventory::autoload();
|
||||||
|
harmony_cli::run(inventory, topology, vec![Box::new(ceph_score)], None)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -3,7 +3,7 @@ use harmony::{
|
|||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
ApplicationScore, RustWebFramework, RustWebapp,
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
features::{Monitoring, PackagingDeployment},
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -77,6 +77,9 @@ harmony_secret = { path = "../harmony_secret" }
|
|||||||
askama.workspace = true
|
askama.workspace = true
|
||||||
sqlx.workspace = true
|
sqlx.workspace = true
|
||||||
inquire.workspace = true
|
inquire.workspace = true
|
||||||
|
brocade = { path = "../brocade" }
|
||||||
|
option-ext = "0.2.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
pretty_assertions.workspace = true
|
pretty_assertions.workspace = true
|
||||||
|
assertor.workspace = true
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ pub enum InterpretName {
|
|||||||
Lamp,
|
Lamp,
|
||||||
ApplicationMonitoring,
|
ApplicationMonitoring,
|
||||||
K8sPrometheusCrdAlerting,
|
K8sPrometheusCrdAlerting,
|
||||||
|
CephRemoveOsd,
|
||||||
DiscoverInventoryAgent,
|
DiscoverInventoryAgent,
|
||||||
CephClusterHealth,
|
CephClusterHealth,
|
||||||
Custom(&'static str),
|
Custom(&'static str),
|
||||||
@@ -61,6 +62,7 @@ impl std::fmt::Display for InterpretName {
|
|||||||
InterpretName::Lamp => f.write_str("LAMP"),
|
InterpretName::Lamp => f.write_str("LAMP"),
|
||||||
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
InterpretName::ApplicationMonitoring => f.write_str("ApplicationMonitoring"),
|
||||||
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
InterpretName::K8sPrometheusCrdAlerting => f.write_str("K8sPrometheusCrdAlerting"),
|
||||||
|
InterpretName::CephRemoveOsd => f.write_str("CephRemoveOsd"),
|
||||||
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
InterpretName::DiscoverInventoryAgent => f.write_str("DiscoverInventoryAgent"),
|
||||||
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
InterpretName::CephClusterHealth => f.write_str("CephClusterHealth"),
|
||||||
InterpretName::Custom(name) => f.write_str(name),
|
InterpretName::Custom(name) => f.write_str(name),
|
||||||
@@ -150,6 +152,12 @@ pub struct InterpretError {
|
|||||||
msg: String,
|
msg: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<InterpretError> for String {
|
||||||
|
fn from(e: InterpretError) -> String {
|
||||||
|
format!("InterpretError : {}", e.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for InterpretError {
|
impl std::fmt::Display for InterpretError {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_str(&self.msg)
|
f.write_str(&self.msg)
|
||||||
|
|||||||
64
harmony/src/domain/topology/failover.rs
Normal file
64
harmony/src/domain/topology/failover.rs
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::topology::k8s_anywhere::K8sAnywhereConfig;
|
||||||
|
use crate::topology::{K8sAnywhereTopology, PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
pub primary: T,
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + Send + Sync> Topology for FailoverTopology<T> {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"FailoverTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
let primary_outcome = self.primary.ensure_ready().await?;
|
||||||
|
let replica_outcome = self.replica.ensure_ready().await?;
|
||||||
|
|
||||||
|
match (primary_outcome, replica_outcome) {
|
||||||
|
(PreparationOutcome::Noop, PreparationOutcome::Noop) => Ok(PreparationOutcome::Noop),
|
||||||
|
(p, r) => {
|
||||||
|
let mut details = Vec::new();
|
||||||
|
if let PreparationOutcome::Success { details: d } = p {
|
||||||
|
details.push(format!("Primary: {}", d));
|
||||||
|
}
|
||||||
|
if let PreparationOutcome::Success { details: d } = r {
|
||||||
|
details.push(format!("Replica: {}", d));
|
||||||
|
}
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: details.join(", "),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FailoverTopology<K8sAnywhereTopology> {
|
||||||
|
/// Creates a new `FailoverTopology` from environment variables.
|
||||||
|
///
|
||||||
|
/// Expects two environment variables:
|
||||||
|
/// - `HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY`: Comma-separated `key=value` pairs, e.g.,
|
||||||
|
/// `kubeconfig=/path/to/primary.kubeconfig,context_name=primary-ctx`
|
||||||
|
/// - `HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA`: Same format for the replica.
|
||||||
|
///
|
||||||
|
/// Parses `kubeconfig` (path to kubeconfig file) and `context_name` (Kubernetes context),
|
||||||
|
/// and constructs `K8sAnywhereConfig` with local installs disabled (`use_local_k3d=false`,
|
||||||
|
/// `autoinstall=false`, `use_system_kubeconfig=false`).
|
||||||
|
/// `harmony_profile` is read from `HARMONY_PROFILE` env or defaults to `"dev"`.
|
||||||
|
///
|
||||||
|
/// Panics if required env vars are missing or malformed.
|
||||||
|
pub fn from_env() -> Self {
|
||||||
|
let primary_config =
|
||||||
|
K8sAnywhereConfig::remote_k8s_from_env_var("HARMONY_FAILOVER_TOPOLOGY_K8S_PRIMARY");
|
||||||
|
let replica_config =
|
||||||
|
K8sAnywhereConfig::remote_k8s_from_env_var("HARMONY_FAILOVER_TOPOLOGY_K8S_REPLICA");
|
||||||
|
|
||||||
|
let primary = K8sAnywhereTopology::with_config(primary_config);
|
||||||
|
let replica = K8sAnywhereTopology::with_config(replica_config);
|
||||||
|
|
||||||
|
Self { primary, replica }
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,34 +1,25 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::net::MacAddress;
|
use harmony_types::{
|
||||||
use harmony_types::net::Url;
|
id::Id,
|
||||||
|
net::{MacAddress, Url},
|
||||||
|
switch::PortLocation,
|
||||||
|
};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::data::FileContent;
|
use crate::infra::network_manager::OpenShiftNmStateNetworkManager;
|
||||||
use crate::executors::ExecutorError;
|
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
|
use crate::{data::FileContent, executors::ExecutorError};
|
||||||
|
|
||||||
use super::DHCPStaticEntry;
|
use super::{
|
||||||
use super::DhcpServer;
|
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||||
use super::DnsRecord;
|
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
||||||
use super::DnsRecordType;
|
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
||||||
use super::DnsServer;
|
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
||||||
use super::Firewall;
|
};
|
||||||
use super::HttpServer;
|
|
||||||
use super::IpAddress;
|
|
||||||
use super::K8sclient;
|
|
||||||
use super::LoadBalancer;
|
|
||||||
use super::LoadBalancerService;
|
|
||||||
use super::LogicalHost;
|
|
||||||
use super::PreparationError;
|
|
||||||
use super::PreparationOutcome;
|
|
||||||
use super::Router;
|
|
||||||
use super::TftpServer;
|
|
||||||
|
|
||||||
use super::Topology;
|
use std::sync::{Arc, OnceLock};
|
||||||
use super::k8s::K8sClient;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HAClusterTopology {
|
pub struct HAClusterTopology {
|
||||||
@@ -40,10 +31,12 @@ pub struct HAClusterTopology {
|
|||||||
pub tftp_server: Arc<dyn TftpServer>,
|
pub tftp_server: Arc<dyn TftpServer>,
|
||||||
pub http_server: Arc<dyn HttpServer>,
|
pub http_server: Arc<dyn HttpServer>,
|
||||||
pub dns_server: Arc<dyn DnsServer>,
|
pub dns_server: Arc<dyn DnsServer>,
|
||||||
|
pub switch_client: Arc<dyn SwitchClient>,
|
||||||
pub bootstrap_host: LogicalHost,
|
pub bootstrap_host: LogicalHost,
|
||||||
pub control_plane: Vec<LogicalHost>,
|
pub control_plane: Vec<LogicalHost>,
|
||||||
pub workers: Vec<LogicalHost>,
|
pub workers: Vec<LogicalHost>,
|
||||||
pub switch: Vec<LogicalHost>,
|
pub kubeconfig: Option<String>,
|
||||||
|
pub network_manager: OnceLock<Arc<dyn NetworkManager>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -62,9 +55,17 @@ impl Topology for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl K8sclient for HAClusterTopology {
|
impl K8sclient for HAClusterTopology {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
Ok(Arc::new(
|
match &self.kubeconfig {
|
||||||
|
None => Ok(Arc::new(
|
||||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||||
))
|
)),
|
||||||
|
Some(kubeconfig) => {
|
||||||
|
let Some(client) = K8sClient::from_kubeconfig(kubeconfig).await else {
|
||||||
|
return Err("Failed to create k8s client".to_string());
|
||||||
|
};
|
||||||
|
Ok(Arc::new(client))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +90,14 @@ impl HAClusterTopology {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn network_manager(&self) -> &dyn NetworkManager {
|
||||||
|
let k8s_client = self.k8s_client().await.unwrap();
|
||||||
|
|
||||||
|
self.network_manager
|
||||||
|
.get_or_init(|| Arc::new(OpenShiftNmStateNetworkManager::new(k8s_client.clone())))
|
||||||
|
.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
let dummy_infra = Arc::new(DummyInfra {});
|
let dummy_infra = Arc::new(DummyInfra {});
|
||||||
let dummy_host = LogicalHost {
|
let dummy_host = LogicalHost {
|
||||||
@@ -97,6 +106,7 @@ impl HAClusterTopology {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "DummyTopology".to_string(),
|
domain_name: "DummyTopology".to_string(),
|
||||||
router: dummy_infra.clone(),
|
router: dummy_infra.clone(),
|
||||||
load_balancer: dummy_infra.clone(),
|
load_balancer: dummy_infra.clone(),
|
||||||
@@ -105,10 +115,11 @@ impl HAClusterTopology {
|
|||||||
tftp_server: dummy_infra.clone(),
|
tftp_server: dummy_infra.clone(),
|
||||||
http_server: dummy_infra.clone(),
|
http_server: dummy_infra.clone(),
|
||||||
dns_server: dummy_infra.clone(),
|
dns_server: dummy_infra.clone(),
|
||||||
|
switch_client: dummy_infra.clone(),
|
||||||
bootstrap_host: dummy_host,
|
bootstrap_host: dummy_host,
|
||||||
control_plane: vec![],
|
control_plane: vec![],
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
switch: vec![],
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -263,6 +274,46 @@ impl HttpServer for HAClusterTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Switch for HAClusterTopology {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
|
self.switch_client.setup().await.map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
self.switch_client.find_port(mac_address).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
debug!("Configuring port channel: {config:#?}");
|
||||||
|
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||||
|
|
||||||
|
self.switch_client
|
||||||
|
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NetworkManager for HAClusterTopology {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
|
self.network_manager()
|
||||||
|
.await
|
||||||
|
.ensure_network_manager_installed()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||||
|
self.network_manager().await.configure_bond(config).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DummyInfra;
|
pub struct DummyInfra;
|
||||||
|
|
||||||
@@ -332,8 +383,8 @@ impl DhcpServer for DummyInfra {
|
|||||||
}
|
}
|
||||||
async fn set_dhcp_range(
|
async fn set_dhcp_range(
|
||||||
&self,
|
&self,
|
||||||
start: &IpAddress,
|
_start: &IpAddress,
|
||||||
end: &IpAddress,
|
_end: &IpAddress,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
@@ -449,3 +500,25 @@ impl DnsServer for DummyInfra {
|
|||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SwitchClient for DummyInfra {
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
_mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
_channel_name: &str,
|
||||||
|
_switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,13 +3,20 @@ use std::time::Duration;
|
|||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use k8s_openapi::{
|
use k8s_openapi::{
|
||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{apps::v1::Deployment, core::v1::Pod},
|
api::{
|
||||||
|
apps::v1::Deployment,
|
||||||
|
core::v1::{Node, Pod, ServiceAccount},
|
||||||
|
},
|
||||||
|
apimachinery::pkg::version::Info,
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Error, Resource,
|
Client, Config, Discovery, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{
|
||||||
|
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||||
|
},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
|
discovery::{ApiCapabilities, Scope},
|
||||||
error::DiscoveryError,
|
error::DiscoveryError,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
@@ -18,11 +25,12 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, trace};
|
use log::{debug, error, trace, warn};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::{io::AsyncReadExt, time::sleep};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -56,6 +64,22 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
||||||
|
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
api
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
|
||||||
|
let client: Client = self.client.clone();
|
||||||
|
let version_info: Info = client.apiserver_version().await?;
|
||||||
|
Ok(version_info)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn discovery(&self) -> Result<Discovery, Error> {
|
||||||
|
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
|
||||||
|
Ok(discovery)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_resource_json_value(
|
pub async fn get_resource_json_value(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
@@ -68,6 +92,7 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||||
};
|
};
|
||||||
|
|
||||||
resource.get(name).await
|
resource.get(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,10 +102,14 @@ impl K8sClient {
|
|||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
) -> Result<Option<Deployment>, Error> {
|
) -> Result<Option<Deployment>, Error> {
|
||||||
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
let deps: Api<Deployment> = if let Some(ns) = namespace {
|
||||||
|
debug!("getting namespaced deployment");
|
||||||
Api::namespaced(self.client.clone(), ns)
|
Api::namespaced(self.client.clone(), ns)
|
||||||
} else {
|
} else {
|
||||||
|
debug!("getting default namespace deployment");
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
||||||
deps.get_opt(name).await
|
deps.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,6 +119,7 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
pods.get_opt(name).await
|
pods.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +141,7 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
let pp = PatchParams::default();
|
let pp = PatchParams::default();
|
||||||
let scale = Patch::Apply(&patch);
|
let scale = Patch::Merge(&patch);
|
||||||
deployments.patch_scale(name, &pp, &scale).await?;
|
deployments.patch_scale(name, &pp, &scale).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -133,9 +163,9 @@ impl K8sClient {
|
|||||||
|
|
||||||
pub async fn wait_until_deployment_ready(
|
pub async fn wait_until_deployment_ready(
|
||||||
&self,
|
&self,
|
||||||
name: String,
|
name: &str,
|
||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
timeout: Option<u64>,
|
timeout: Option<Duration>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let api: Api<Deployment>;
|
let api: Api<Deployment>;
|
||||||
|
|
||||||
@@ -145,9 +175,9 @@ impl K8sClient {
|
|||||||
api = Api::default_namespaced(self.client.clone());
|
api = Api::default_namespaced(self.client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
||||||
let t = timeout.unwrap_or(300);
|
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
||||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
let res = tokio::time::timeout(timeout, establish).await;
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -167,13 +197,15 @@ impl K8sClient {
|
|||||||
loop {
|
loop {
|
||||||
let pod = self.get_pod(pod_name, namespace).await?;
|
let pod = self.get_pod(pod_name, namespace).await?;
|
||||||
|
|
||||||
if let Some(p) = pod
|
if let Some(p) = pod {
|
||||||
&& let Some(status) = p.status
|
if let Some(status) = p.status {
|
||||||
&& let Some(phase) = status.phase
|
if let Some(phase) = status.phase {
|
||||||
&& phase.to_lowercase() == "running"
|
if phase.to_lowercase() == "running" {
|
||||||
{
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if elapsed >= timeout_secs {
|
if elapsed >= timeout_secs {
|
||||||
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
|
||||||
@@ -341,14 +373,14 @@ impl K8sClient {
|
|||||||
Ok(current) => {
|
Ok(current) => {
|
||||||
trace!("Received current value {current:#?}");
|
trace!("Received current value {current:#?}");
|
||||||
// The resource exists, so we calculate and display a diff.
|
// The resource exists, so we calculate and display a diff.
|
||||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
println!("\nPerforming dry-run for resource: '{name}'");
|
||||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||||
panic!("Could not serialize current value : {current:#?}")
|
panic!("Could not serialize current value : {current:#?}")
|
||||||
});
|
});
|
||||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||||
let map = current_yaml.as_mapping_mut().unwrap();
|
let map = current_yaml.as_mapping_mut().unwrap();
|
||||||
let removed = map.remove_entry("status");
|
let removed = map.remove_entry("status");
|
||||||
trace!("Removed status {:?}", removed);
|
trace!("Removed status {removed:?}");
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"Did not find status entry for current object {}/{}",
|
"Did not find status entry for current object {}/{}",
|
||||||
@@ -377,14 +409,14 @@ impl K8sClient {
|
|||||||
similar::ChangeTag::Insert => "+",
|
similar::ChangeTag::Insert => "+",
|
||||||
similar::ChangeTag::Equal => " ",
|
similar::ChangeTag::Equal => " ",
|
||||||
};
|
};
|
||||||
print!("{}{}", sign, change);
|
print!("{sign}{change}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been applied.
|
// In a dry run, we return the new resource state that would have been applied.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
// The resource does not exist, so the "diff" is the entire new resource.
|
// The resource does not exist, so the "diff" is the entire new resource.
|
||||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
println!("\nPerforming dry-run for new resource: '{name}'");
|
||||||
println!(
|
println!(
|
||||||
"Resource does not exist. It would be created with the following content:"
|
"Resource does not exist. It would be created with the following content:"
|
||||||
);
|
);
|
||||||
@@ -393,14 +425,14 @@ impl K8sClient {
|
|||||||
|
|
||||||
// Print each line of the new resource with a '+' prefix.
|
// Print each line of the new resource with a '+' prefix.
|
||||||
for line in new_yaml.lines() {
|
for line in new_yaml.lines() {
|
||||||
println!("+{}", line);
|
println!("+{line}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been created.
|
// In a dry run, we return the new resource state that would have been created.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Another API error occurred.
|
// Another API error occurred.
|
||||||
error!("Failed to get resource '{}': {}", name, e);
|
error!("Failed to get resource '{name}': {e}");
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -415,11 +447,24 @@ impl K8sClient {
|
|||||||
where
|
where
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for r in resource.iter() {
|
for r in resource.iter() {
|
||||||
result.push(self.apply(r, ns).await?);
|
let apply_result = self.apply(r, ns).await;
|
||||||
|
if apply_result.is_err() {
|
||||||
|
// NOTE : We should be careful about this one, it may leak sensitive information in
|
||||||
|
// logs
|
||||||
|
// Maybe just reducing it to debug would be enough as we already know debug logs
|
||||||
|
// are unsafe.
|
||||||
|
// But keeping it at warn makes it much easier to understand what is going on. So be it for now.
|
||||||
|
warn!(
|
||||||
|
"Failed to apply k8s resource : {}",
|
||||||
|
serde_json::to_string_pretty(r).map_err(|e| Error::SerdeError(e))?
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
result.push(apply_result?);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
@@ -480,10 +525,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||||
// This will create the resource if it doesn't exist, or update it if it does.
|
// This will create the resource if it doesn't exist, or update it if it does.
|
||||||
println!(
|
println!("Applying '{name}' in namespace '{namespace}'...",);
|
||||||
"Applying Argo Application '{}' in namespace '{}'...",
|
|
||||||
name, namespace
|
|
||||||
);
|
|
||||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
|
||||||
@@ -492,7 +534,120 @@ impl K8sClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
/// Apply a resource from a URL
|
||||||
|
///
|
||||||
|
/// It is the equivalent of `kubectl apply -f <url>`
|
||||||
|
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
||||||
|
let patch_params = PatchParams::apply("harmony");
|
||||||
|
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
|
||||||
|
|
||||||
|
let yaml = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.expect("Could not get URL")
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.expect("Could not get content from URL");
|
||||||
|
|
||||||
|
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
|
||||||
|
let obj: DynamicObject =
|
||||||
|
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
|
||||||
|
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
||||||
|
let type_meta = obj
|
||||||
|
.types
|
||||||
|
.as_ref()
|
||||||
|
.expect("cannot apply object without valid TypeMeta");
|
||||||
|
let gvk = GroupVersionKind::try_from(type_meta)
|
||||||
|
.expect("cannot apply object without valid GroupVersionKind");
|
||||||
|
let name = obj.name_any();
|
||||||
|
|
||||||
|
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
||||||
|
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
||||||
|
trace!(
|
||||||
|
"Applying {}: \n{}",
|
||||||
|
gvk.kind,
|
||||||
|
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
|
||||||
|
);
|
||||||
|
let data: serde_json::Value =
|
||||||
|
serde_json::to_value(&obj).expect("Failed to serialize JSON");
|
||||||
|
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
||||||
|
debug!("applied {} {}", gvk.kind, name);
|
||||||
|
} else {
|
||||||
|
warn!("Cannot apply document for unknown {gvk:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a single named resource of a specific type `K`.
|
||||||
|
///
|
||||||
|
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||||
|
/// whether to look in a specific namespace or in the entire cluster.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` if the resource is not found (404).
|
||||||
|
pub async fn get_resource<K>(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<Option<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||||
|
|
||||||
|
api.get_opt(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all resources of a specific type `K`.
|
||||||
|
///
|
||||||
|
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||||
|
/// whether to list from a specific namespace or from the entire cluster.
|
||||||
|
pub async fn list_resources<K>(
|
||||||
|
&self,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||||
|
|
||||||
|
let list_params = list_params.unwrap_or_default();
|
||||||
|
api.list(&list_params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetches a list of all Nodes in the cluster.
|
||||||
|
pub async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<Node>, Error> {
|
||||||
|
self.list_resources(None, list_params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
|
Self::from_kubeconfig_with_opts(path, &KubeConfigOptions::default()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig_with_context(
|
||||||
|
path: &str,
|
||||||
|
context: Option<String>,
|
||||||
|
) -> Option<K8sClient> {
|
||||||
|
let mut opts = KubeConfigOptions::default();
|
||||||
|
opts.context = context;
|
||||||
|
|
||||||
|
Self::from_kubeconfig_with_opts(path, &opts).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig_with_opts(
|
||||||
|
path: &str,
|
||||||
|
opts: &KubeConfigOptions,
|
||||||
|
) -> Option<K8sClient> {
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -500,17 +655,38 @@ impl K8sClient {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(K8sClient::new(
|
Some(K8sClient::new(
|
||||||
Client::try_from(
|
Client::try_from(Config::from_custom_kubeconfig(k, &opts).await.unwrap()).unwrap(),
|
||||||
Config::from_custom_kubeconfig(k, &KubeConfigOptions::default())
|
|
||||||
.await
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_dynamic_api(
|
||||||
|
resource: ApiResource,
|
||||||
|
capabilities: ApiCapabilities,
|
||||||
|
client: Client,
|
||||||
|
ns: Option<&str>,
|
||||||
|
all: bool,
|
||||||
|
) -> Api<DynamicObject> {
|
||||||
|
if capabilities.scope == Scope::Cluster || all {
|
||||||
|
Api::all_with(client, &resource)
|
||||||
|
} else if let Some(namespace) = ns {
|
||||||
|
Api::namespaced_with(client, namespace, &resource)
|
||||||
|
} else {
|
||||||
|
Api::default_namespaced_with(client, &resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
||||||
|
use serde::Deserialize;
|
||||||
|
let mut docs = vec![];
|
||||||
|
for de in serde_yaml::Deserializer::from_str(data) {
|
||||||
|
docs.push(serde_yaml::Value::deserialize(de)?);
|
||||||
|
}
|
||||||
|
Ok(docs)
|
||||||
|
}
|
||||||
|
|
||||||
pub trait ApplyStrategy<K: Resource> {
|
pub trait ApplyStrategy<K: Resource> {
|
||||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,620 +0,0 @@
|
|||||||
use std::{process::Command, sync::Arc};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use kube::api::GroupVersionKind;
|
|
||||||
use log::{debug, info, warn};
|
|
||||||
use serde::Serialize;
|
|
||||||
use tokio::sync::OnceCell;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
executors::ExecutorError,
|
|
||||||
interpret::InterpretStatus,
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
|
||||||
k3d::K3DInstallationScore,
|
|
||||||
monitoring::kube_prometheus::crd::{
|
|
||||||
crd_alertmanager_config::CRDPrometheus,
|
|
||||||
prometheus_operator::prometheus_operator_helm_chart_score,
|
|
||||||
rhob_alertmanager_config::RHOBObservability,
|
|
||||||
},
|
|
||||||
prometheus::{
|
|
||||||
k8s_prometheus_alerting_score::K8sPrometheusCRDAlertingScore,
|
|
||||||
prometheus::PrometheusApplicationMonitoring, rhob_alerting_score::RHOBAlertingScore,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::ingress::Ingress,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, PreparationError,
|
|
||||||
PreparationOutcome, Topology,
|
|
||||||
k8s::K8sClient,
|
|
||||||
oberservability::monitoring::AlertReceiver,
|
|
||||||
tenant::{
|
|
||||||
TenantConfig, TenantManager,
|
|
||||||
k8s::K8sTenantManager,
|
|
||||||
network_policy::{
|
|
||||||
K3dNetworkPolicyStrategy, NetworkPolicyStrategy, NoopNetworkPolicyStrategy,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
struct K8sState {
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
source: K8sSource,
|
|
||||||
message: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
enum K8sSource {
|
|
||||||
LocalK3d,
|
|
||||||
Kubeconfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct K8sAnywhereTopology {
|
|
||||||
k8s_state: Arc<OnceCell<Option<K8sState>>>,
|
|
||||||
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
|
|
||||||
config: Arc<K8sAnywhereConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl K8sclient for K8sAnywhereTopology {
|
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
|
||||||
let state = match self.k8s_state.get() {
|
|
||||||
Some(state) => state,
|
|
||||||
None => return Err("K8s state not initialized yet".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let state = match state {
|
|
||||||
Some(state) => state,
|
|
||||||
None => return Err("K8s client initialized but empty".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(state.client.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl PrometheusApplicationMonitoring<CRDPrometheus> for K8sAnywhereTopology {
|
|
||||||
async fn install_prometheus(
|
|
||||||
&self,
|
|
||||||
sender: &CRDPrometheus,
|
|
||||||
inventory: &Inventory,
|
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let po_result = self.ensure_prometheus_operator(sender).await?;
|
|
||||||
|
|
||||||
if po_result == PreparationOutcome::Noop {
|
|
||||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
|
||||||
return Ok(po_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = self
|
|
||||||
.get_k8s_prometheus_application_score(sender.clone(), receivers)
|
|
||||||
.await
|
|
||||||
.interpret(inventory, self)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
|
||||||
details: outcome.message,
|
|
||||||
}),
|
|
||||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
|
||||||
_ => Err(PreparationError::new(outcome.message)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl PrometheusApplicationMonitoring<RHOBObservability> for K8sAnywhereTopology {
|
|
||||||
async fn install_prometheus(
|
|
||||||
&self,
|
|
||||||
sender: &RHOBObservability,
|
|
||||||
inventory: &Inventory,
|
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let po_result = self.ensure_cluster_observability_operator(sender).await?;
|
|
||||||
|
|
||||||
if po_result == PreparationOutcome::Noop {
|
|
||||||
debug!("Skipping Prometheus CR installation due to missing operator.");
|
|
||||||
return Ok(po_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = self
|
|
||||||
.get_cluster_observability_operator_prometheus_application_score(
|
|
||||||
sender.clone(),
|
|
||||||
receivers,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.interpret(inventory, self)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
|
||||||
details: outcome.message,
|
|
||||||
}),
|
|
||||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
|
||||||
_ => Err(PreparationError::new(outcome.message)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for K8sAnywhereTopology {
|
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl K8sAnywhereTopology {
|
|
||||||
pub fn from_env() -> Self {
|
|
||||||
Self {
|
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(K8sAnywhereConfig::from_env()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_config(config: K8sAnywhereConfig) -> Self {
|
|
||||||
Self {
|
|
||||||
k8s_state: Arc::new(OnceCell::new()),
|
|
||||||
tenant_manager: Arc::new(OnceCell::new()),
|
|
||||||
config: Arc::new(config),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_cluster_observability_operator_prometheus_application_score(
|
|
||||||
&self,
|
|
||||||
sender: RHOBObservability,
|
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<RHOBObservability>>>>,
|
|
||||||
) -> RHOBAlertingScore {
|
|
||||||
RHOBAlertingScore {
|
|
||||||
sender,
|
|
||||||
receivers: receivers.unwrap_or_default(),
|
|
||||||
service_monitors: vec![],
|
|
||||||
prometheus_rules: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_k8s_prometheus_application_score(
|
|
||||||
&self,
|
|
||||||
sender: CRDPrometheus,
|
|
||||||
receivers: Option<Vec<Box<dyn AlertReceiver<CRDPrometheus>>>>,
|
|
||||||
) -> K8sPrometheusCRDAlertingScore {
|
|
||||||
K8sPrometheusCRDAlertingScore {
|
|
||||||
sender,
|
|
||||||
receivers: receivers.unwrap_or_default(),
|
|
||||||
service_monitors: vec![],
|
|
||||||
prometheus_rules: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn openshift_ingress_operator_available(&self) -> Result<(), PreparationError> {
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
|
||||||
.await?;
|
|
||||||
let ready_replicas = ic.data["status"]["availableReplicas"].as_i64().unwrap_or(0);
|
|
||||||
if ready_replicas >= 1 {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(PreparationError::new(
|
|
||||||
"openshift-ingress-operator not available".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_helm_available(&self) -> Result<(), String> {
|
|
||||||
let version_result = Command::new("helm")
|
|
||||||
.arg("version")
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to execute 'helm -version': {}", e))?;
|
|
||||||
|
|
||||||
if !version_result.status.success() {
|
|
||||||
return Err("Failed to run 'helm -version'".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let version_output = String::from_utf8_lossy(&version_result.stdout);
|
|
||||||
debug!("Helm version: {}", version_output.trim());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_load_system_kubeconfig(&self) -> Option<K8sClient> {
|
|
||||||
todo!("Use kube-rs default behavior to load system kubeconfig");
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_load_kubeconfig(&self, path: &str) -> Option<K8sClient> {
|
|
||||||
K8sClient::from_kubeconfig(path).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_k3d_installation_score(&self) -> K3DInstallationScore {
|
|
||||||
K3DInstallationScore::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_install_k3d(&self) -> Result<(), PreparationError> {
|
|
||||||
let result = self
|
|
||||||
.get_k3d_installation_score()
|
|
||||||
.interpret(&Inventory::empty(), self)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(()),
|
|
||||||
InterpretStatus::NOOP => Ok(()),
|
|
||||||
_ => Err(PreparationError::new(outcome.message)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_get_or_install_k8s_client(&self) -> Result<Option<K8sState>, PreparationError> {
|
|
||||||
let k8s_anywhere_config = &self.config;
|
|
||||||
|
|
||||||
// TODO this deserves some refactoring, it is becoming a bit hard to figure out
|
|
||||||
// be careful when making modifications here
|
|
||||||
if k8s_anywhere_config.use_local_k3d {
|
|
||||||
debug!("Using local k3d cluster because of use_local_k3d set to true");
|
|
||||||
} else {
|
|
||||||
if let Some(kubeconfig) = &k8s_anywhere_config.kubeconfig {
|
|
||||||
debug!("Loading kubeconfig {kubeconfig}");
|
|
||||||
match self.try_load_kubeconfig(kubeconfig).await {
|
|
||||||
Some(client) => {
|
|
||||||
return Ok(Some(K8sState {
|
|
||||||
client: Arc::new(client),
|
|
||||||
source: K8sSource::Kubeconfig,
|
|
||||||
message: format!("Loaded k8s client from kubeconfig {kubeconfig}"),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
return Err(PreparationError::new(format!(
|
|
||||||
"Failed to load kubeconfig from {kubeconfig}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if k8s_anywhere_config.use_system_kubeconfig {
|
|
||||||
debug!("Loading system kubeconfig");
|
|
||||||
match self.try_load_system_kubeconfig().await {
|
|
||||||
Some(_client) => todo!(),
|
|
||||||
None => todo!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("No kubernetes configuration found");
|
|
||||||
}
|
|
||||||
|
|
||||||
if !k8s_anywhere_config.autoinstall {
|
|
||||||
warn!(
|
|
||||||
"Installation cancelled, K8sAnywhere could not initialize a valid Kubernetes client"
|
|
||||||
);
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Starting K8sAnywhere installation");
|
|
||||||
self.try_install_k3d().await?;
|
|
||||||
let k3d_score = self.get_k3d_installation_score();
|
|
||||||
// I feel like having to rely on the k3d_rs crate here is a smell
|
|
||||||
// I think we should have a way to interact more deeply with scores/interpret. Maybe the
|
|
||||||
// K3DInstallationScore should expose a method to get_client ? Not too sure what would be a
|
|
||||||
// good implementation due to the stateful nature of the k3d thing. Which is why I went
|
|
||||||
// with this solution for now
|
|
||||||
let k3d = k3d_rs::K3d::new(k3d_score.installation_path, Some(k3d_score.cluster_name));
|
|
||||||
let state = match k3d.get_client().await {
|
|
||||||
Ok(client) => K8sState {
|
|
||||||
client: Arc::new(K8sClient::new(client)),
|
|
||||||
source: K8sSource::LocalK3d,
|
|
||||||
message: "K8s client ready".to_string(),
|
|
||||||
},
|
|
||||||
Err(_) => todo!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some(state))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_k8s_tenant_manager(&self, k8s_state: &K8sState) -> Result<(), String> {
|
|
||||||
if self.tenant_manager.get().is_some() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.tenant_manager
|
|
||||||
.get_or_try_init(async || -> Result<K8sTenantManager, String> {
|
|
||||||
let k8s_client = self.k8s_client().await?;
|
|
||||||
let network_policy_strategy: Box<dyn NetworkPolicyStrategy> = match k8s_state.source
|
|
||||||
{
|
|
||||||
K8sSource::LocalK3d => Box::new(K3dNetworkPolicyStrategy::new()),
|
|
||||||
K8sSource::Kubeconfig => Box::new(NoopNetworkPolicyStrategy::new()),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(K8sTenantManager::new(k8s_client, network_policy_strategy))
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_k8s_tenant_manager(&self) -> Result<&K8sTenantManager, ExecutorError> {
|
|
||||||
match self.tenant_manager.get() {
|
|
||||||
Some(t) => Ok(t),
|
|
||||||
None => Err(ExecutorError::UnexpectedError(
|
|
||||||
"K8sTenantManager not available".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_cluster_observability_operator(
|
|
||||||
&self,
|
|
||||||
sender: &RHOBObservability,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let status = Command::new("sh")
|
|
||||||
.args(["-c", "kubectl get crd -A | grep -i rhobs"])
|
|
||||||
.status()
|
|
||||||
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
|
|
||||||
|
|
||||||
if !status.success() {
|
|
||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
|
||||||
match k8s_state.source {
|
|
||||||
K8sSource::LocalK3d => {
|
|
||||||
warn!(
|
|
||||||
"Installing observability operator is not supported on LocalK3d source"
|
|
||||||
);
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
debug!("installing cluster observability operator");
|
|
||||||
todo!();
|
|
||||||
let op_score =
|
|
||||||
prometheus_operator_helm_chart_score(sender.namespace.clone());
|
|
||||||
let result = op_score.interpret(&Inventory::empty(), self).await;
|
|
||||||
|
|
||||||
return match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
|
||||||
details: "installed cluster observability operator".into(),
|
|
||||||
}),
|
|
||||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
|
||||||
_ => Err(PreparationError::new(
|
|
||||||
"failed to install cluster observability operator (unknown error)".into(),
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
K8sSource::Kubeconfig => {
|
|
||||||
debug!(
|
|
||||||
"unable to install cluster observability operator, contact cluster admin"
|
|
||||||
);
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"Unable to detect k8s_state. Skipping Cluster Observability Operator install."
|
|
||||||
);
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Cluster Observability Operator is already present, skipping install");
|
|
||||||
|
|
||||||
Ok(PreparationOutcome::Success {
|
|
||||||
details: "cluster observability operator present in cluster".into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_prometheus_operator(
|
|
||||||
&self,
|
|
||||||
sender: &CRDPrometheus,
|
|
||||||
) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let status = Command::new("sh")
|
|
||||||
.args(["-c", "kubectl get crd -A | grep -i prometheuses"])
|
|
||||||
.status()
|
|
||||||
.map_err(|e| PreparationError::new(format!("could not connect to cluster: {}", e)))?;
|
|
||||||
|
|
||||||
if !status.success() {
|
|
||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
|
||||||
match k8s_state.source {
|
|
||||||
K8sSource::LocalK3d => {
|
|
||||||
debug!("installing prometheus operator");
|
|
||||||
let op_score =
|
|
||||||
prometheus_operator_helm_chart_score(sender.namespace.clone());
|
|
||||||
let result = op_score.interpret(&Inventory::empty(), self).await;
|
|
||||||
|
|
||||||
return match result {
|
|
||||||
Ok(outcome) => match outcome.status {
|
|
||||||
InterpretStatus::SUCCESS => Ok(PreparationOutcome::Success {
|
|
||||||
details: "installed prometheus operator".into(),
|
|
||||||
}),
|
|
||||||
InterpretStatus::NOOP => Ok(PreparationOutcome::Noop),
|
|
||||||
_ => Err(PreparationError::new(
|
|
||||||
"failed to install prometheus operator (unknown error)".into(),
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Err(err) => Err(PreparationError::new(err.to_string())),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
K8sSource::Kubeconfig => {
|
|
||||||
debug!("unable to install prometheus operator, contact cluster admin");
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("Unable to detect k8s_state. Skipping Prometheus Operator install.");
|
|
||||||
return Ok(PreparationOutcome::Noop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Prometheus operator is already present, skipping install");
|
|
||||||
|
|
||||||
Ok(PreparationOutcome::Success {
|
|
||||||
details: "prometheus operator present in cluster".into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct K8sAnywhereConfig {
|
|
||||||
/// The path of the KUBECONFIG file that Harmony should use to interact with the Kubernetes
|
|
||||||
/// cluster
|
|
||||||
///
|
|
||||||
/// Default : None
|
|
||||||
pub kubeconfig: Option<String>,
|
|
||||||
|
|
||||||
/// Whether to use the system KUBECONFIG, either the environment variable or the file in the
|
|
||||||
/// default or configured location
|
|
||||||
///
|
|
||||||
/// Default : false
|
|
||||||
pub use_system_kubeconfig: bool,
|
|
||||||
|
|
||||||
/// Whether to install automatically a kubernetes cluster
|
|
||||||
///
|
|
||||||
/// When enabled, autoinstall will setup a K3D cluster on the localhost. https://k3d.io/stable/
|
|
||||||
///
|
|
||||||
/// Default: true
|
|
||||||
pub autoinstall: bool,
|
|
||||||
|
|
||||||
/// Whether to use local k3d cluster.
|
|
||||||
///
|
|
||||||
/// Takes precedence over other options, useful to avoid messing up a remote cluster by mistake
|
|
||||||
///
|
|
||||||
/// default: true
|
|
||||||
pub use_local_k3d: bool,
|
|
||||||
pub harmony_profile: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl K8sAnywhereConfig {
|
|
||||||
fn from_env() -> Self {
|
|
||||||
Self {
|
|
||||||
kubeconfig: std::env::var("KUBECONFIG").ok().map(|v| v.to_string()),
|
|
||||||
use_system_kubeconfig: std::env::var("HARMONY_USE_SYSTEM_KUBECONFIG")
|
|
||||||
.map_or_else(|_| false, |v| v.parse().ok().unwrap_or(false)),
|
|
||||||
autoinstall: std::env::var("HARMONY_AUTOINSTALL")
|
|
||||||
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(false)),
|
|
||||||
// TODO harmony_profile should be managed at a more core level than this
|
|
||||||
harmony_profile: std::env::var("HARMONY_PROFILE").map_or_else(
|
|
||||||
|_| "dev".to_string(),
|
|
||||||
|v| v.parse().ok().unwrap_or("dev".to_string()),
|
|
||||||
),
|
|
||||||
use_local_k3d: std::env::var("HARMONY_USE_LOCAL_K3D")
|
|
||||||
.map_or_else(|_| true, |v| v.parse().ok().unwrap_or(true)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Topology for K8sAnywhereTopology {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
"K8sAnywhereTopology"
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
|
||||||
let k8s_state = self
|
|
||||||
.k8s_state
|
|
||||||
.get_or_try_init(|| self.try_get_or_install_k8s_client())
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let k8s_state: &K8sState = k8s_state.as_ref().ok_or(PreparationError::new(
|
|
||||||
"no K8s client could be found or installed".to_string(),
|
|
||||||
))?;
|
|
||||||
|
|
||||||
self.ensure_k8s_tenant_manager(k8s_state)
|
|
||||||
.await
|
|
||||||
.map_err(PreparationError::new)?;
|
|
||||||
|
|
||||||
match self.is_helm_available() {
|
|
||||||
Ok(()) => Ok(PreparationOutcome::Success {
|
|
||||||
details: format!("{} + helm available", k8s_state.message.clone()),
|
|
||||||
}),
|
|
||||||
Err(e) => Err(PreparationError::new(format!("helm unavailable: {}", e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultiTargetTopology for K8sAnywhereTopology {
|
|
||||||
fn current_target(&self) -> DeploymentTarget {
|
|
||||||
if self.config.use_local_k3d {
|
|
||||||
return DeploymentTarget::LocalDev;
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.config.harmony_profile.to_lowercase().as_str() {
|
|
||||||
"staging" => DeploymentTarget::Staging,
|
|
||||||
"production" => DeploymentTarget::Production,
|
|
||||||
_ => todo!("HARMONY_PROFILE must be set when use_local_k3d is false"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HelmCommand for K8sAnywhereTopology {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl TenantManager for K8sAnywhereTopology {
|
|
||||||
async fn provision_tenant(&self, config: &TenantConfig) -> Result<(), ExecutorError> {
|
|
||||||
self.get_k8s_tenant_manager()?
|
|
||||||
.provision_tenant(config)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_tenant_config(&self) -> Option<TenantConfig> {
|
|
||||||
self.get_k8s_tenant_manager()
|
|
||||||
.ok()?
|
|
||||||
.get_tenant_config()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Ingress for K8sAnywhereTopology {
|
|
||||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
|
||||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
|
||||||
let client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
|
||||||
match k8s_state.source {
|
|
||||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
|
||||||
K8sSource::Kubeconfig => {
|
|
||||||
self.openshift_ingress_operator_available().await?;
|
|
||||||
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value(
|
|
||||||
"default",
|
|
||||||
Some("openshift-ingress-operator"),
|
|
||||||
&gvk,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| {
|
|
||||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
|
||||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
|
||||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(PreparationError::new(
|
|
||||||
"Cannot get domain: unable to detect K8s state".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
1305
harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs
Normal file
1305
harmony/src/domain/topology/k8s_anywhere/k8s_anywhere.rs
Normal file
File diff suppressed because it is too large
Load Diff
3
harmony/src/domain/topology/k8s_anywhere/mod.rs
Normal file
3
harmony/src/domain/topology/k8s_anywhere/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mod k8s_anywhere;
|
||||||
|
mod postgres;
|
||||||
|
pub use k8s_anywhere::*;
|
||||||
125
harmony/src/domain/topology/k8s_anywhere/postgres.rs
Normal file
125
harmony/src/domain/topology/k8s_anywhere/postgres.rs
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::Outcome,
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::postgresql::{
|
||||||
|
K8sPostgreSQLScore,
|
||||||
|
capability::{PostgreSQL, PostgreSQLConfig, PostgreSQLEndpoint, ReplicationCerts},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{K8sAnywhereTopology, K8sclient},
|
||||||
|
};
|
||||||
|
|
||||||
|
use k8s_openapi::api::core::v1::{Secret, Service};
|
||||||
|
use log::info;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PostgreSQL for K8sAnywhereTopology {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
K8sPostgreSQLScore {
|
||||||
|
config: config.clone(),
|
||||||
|
}
|
||||||
|
.interpret(&Inventory::empty(), self)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to deploy k8s postgresql : {e}"))?;
|
||||||
|
|
||||||
|
Ok(config.cluster_name.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster.
|
||||||
|
/// Abstracts away storage/retrieval details (e.g., secrets, files).
|
||||||
|
async fn get_replication_certs(&self, config: &PostgreSQLConfig) -> Result<ReplicationCerts, String> {
|
||||||
|
let cluster_name = &config.cluster_name;
|
||||||
|
let namespace = &config.namespace;
|
||||||
|
let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
let replication_secret_name = format!("{cluster_name}-replication");
|
||||||
|
let replication_secret = k8s_client
|
||||||
|
.get_resource::<Secret>(&replication_secret_name, Some(namespace))
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to get {replication_secret_name}: {e}"))?
|
||||||
|
.ok_or_else(|| format!("Replication secret '{replication_secret_name}' not found"))?;
|
||||||
|
|
||||||
|
let ca_secret_name = format!("{cluster_name}-ca");
|
||||||
|
let ca_secret = k8s_client
|
||||||
|
.get_resource::<Secret>(&ca_secret_name, Some(namespace))
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to get {ca_secret_name}: {e}"))?
|
||||||
|
.ok_or_else(|| format!("CA secret '{ca_secret_name}' not found"))?;
|
||||||
|
|
||||||
|
let replication_data = replication_secret
|
||||||
|
.data
|
||||||
|
.as_ref()
|
||||||
|
.ok_or("Replication secret has no data".to_string())?;
|
||||||
|
let ca_data = ca_secret
|
||||||
|
.data
|
||||||
|
.as_ref()
|
||||||
|
.ok_or("CA secret has no data".to_string())?;
|
||||||
|
|
||||||
|
let tls_key_bs = replication_data
|
||||||
|
.get("tls.key")
|
||||||
|
.ok_or("missing tls.key in replication secret".to_string())?;
|
||||||
|
let tls_crt_bs = replication_data
|
||||||
|
.get("tls.crt")
|
||||||
|
.ok_or("missing tls.crt in replication secret".to_string())?;
|
||||||
|
let ca_crt_bs = ca_data
|
||||||
|
.get("ca.crt")
|
||||||
|
.ok_or("missing ca.crt in CA secret".to_string())?;
|
||||||
|
|
||||||
|
let streaming_replica_key_pem = String::from_utf8_lossy(&tls_key_bs.0).to_string();
|
||||||
|
let streaming_replica_cert_pem = String::from_utf8_lossy(&tls_crt_bs.0).to_string();
|
||||||
|
let ca_cert_pem = String::from_utf8_lossy(&ca_crt_bs.0).to_string();
|
||||||
|
|
||||||
|
info!("Successfully extracted replication certs for cluster '{cluster_name}'");
|
||||||
|
|
||||||
|
Ok(ReplicationCerts {
|
||||||
|
ca_cert_pem,
|
||||||
|
streaming_replica_cert_pem,
|
||||||
|
streaming_replica_key_pem,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster.
|
||||||
|
async fn get_endpoint(&self, config: &PostgreSQLConfig) -> Result<PostgreSQLEndpoint, String> {
|
||||||
|
let cluster_name = &config.cluster_name;
|
||||||
|
let namespace = &config.namespace;
|
||||||
|
|
||||||
|
let k8s_client = self.k8s_client().await.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
let service_name = format!("{cluster_name}-rw");
|
||||||
|
let service = k8s_client
|
||||||
|
.get_resource::<Service>(&service_name, Some(namespace))
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to get service '{service_name}': {e}"))?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format!("Service '{service_name}' not found for cluster '{cluster_name}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let ns = service
|
||||||
|
.metadata
|
||||||
|
.namespace
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("default")
|
||||||
|
.to_string();
|
||||||
|
let host = format!("{service_name}.{ns}.svc.cluster.local");
|
||||||
|
|
||||||
|
info!("Internal endpoint for '{cluster_name}': {host}:5432");
|
||||||
|
|
||||||
|
Ok(PostgreSQLEndpoint { host, port: 5432 })
|
||||||
|
}
|
||||||
|
|
||||||
|
// /// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough).
|
||||||
|
// /// Returns None if no public endpoint (internal-only cluster).
|
||||||
|
// /// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex
|
||||||
|
// /// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait.
|
||||||
|
// async fn get_public_endpoint(
|
||||||
|
// &self,
|
||||||
|
// cluster_name: &str,
|
||||||
|
// ) -> Result<Option<PostgreSQLEndpoint>, String> {
|
||||||
|
// // TODO: Implement OpenShift Route lookup targeting '{cluster_name}-rw' service on port 5432 with TLS passthrough
|
||||||
|
// // For now, return None assuming internal-only access or manual route configuration
|
||||||
|
// info!("Public endpoint lookup not implemented for '{cluster_name}', returning None");
|
||||||
|
// Ok(None)
|
||||||
|
// }
|
||||||
|
}
|
||||||
@@ -28,13 +28,7 @@ pub trait LoadBalancer: Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
service: &LoadBalancerService,
|
service: &LoadBalancerService,
|
||||||
) -> Result<(), ExecutorError> {
|
) -> Result<(), ExecutorError> {
|
||||||
debug!(
|
|
||||||
"Listing LoadBalancer services {:?}",
|
|
||||||
self.list_services().await
|
|
||||||
);
|
|
||||||
if !self.list_services().await.contains(service) {
|
|
||||||
self.add_service(service).await?;
|
self.add_service(service).await?;
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
|
mod failover;
|
||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
|
pub use failover::*;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
@@ -13,7 +15,7 @@ pub use k8s_anywhere::*;
|
|||||||
pub use localhost::*;
|
pub use localhost::*;
|
||||||
pub mod k8s;
|
pub mod k8s;
|
||||||
mod load_balancer;
|
mod load_balancer;
|
||||||
mod router;
|
pub mod router;
|
||||||
mod tftp;
|
mod tftp;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
pub use ha_cluster::*;
|
pub use ha_cluster::*;
|
||||||
|
|||||||
@@ -1,7 +1,18 @@
|
|||||||
use std::{net::Ipv4Addr, str::FromStr, sync::Arc};
|
use std::{
|
||||||
|
error::Error,
|
||||||
|
fmt::{self, Debug},
|
||||||
|
net::Ipv4Addr,
|
||||||
|
str::FromStr,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::net::{IpAddress, MacAddress};
|
use derive_new::new;
|
||||||
|
use harmony_types::{
|
||||||
|
id::Id,
|
||||||
|
net::{IpAddress, MacAddress},
|
||||||
|
switch::PortLocation,
|
||||||
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::executors::ExecutorError;
|
use crate::executors::ExecutorError;
|
||||||
@@ -15,8 +26,8 @@ pub struct DHCPStaticEntry {
|
|||||||
pub ip: Ipv4Addr,
|
pub ip: Ipv4Addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for DHCPStaticEntry {
|
impl fmt::Display for DHCPStaticEntry {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let mac = self
|
let mac = self
|
||||||
.mac
|
.mac
|
||||||
.iter()
|
.iter()
|
||||||
@@ -38,8 +49,8 @@ pub trait Firewall: Send + Sync {
|
|||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn Firewall {
|
impl Debug for dyn Firewall {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
f.write_fmt(format_args!("Firewall {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -61,7 +72,7 @@ pub struct PxeOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait DhcpServer: Send + Sync + std::fmt::Debug {
|
pub trait DhcpServer: Send + Sync + Debug {
|
||||||
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
async fn add_static_mapping(&self, entry: &DHCPStaticEntry) -> Result<(), ExecutorError>;
|
||||||
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
async fn remove_static_mapping(&self, mac: &MacAddress) -> Result<(), ExecutorError>;
|
||||||
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>;
|
||||||
@@ -100,8 +111,8 @@ pub trait DnsServer: Send + Sync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn DnsServer {
|
impl Debug for dyn DnsServer {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
f.write_fmt(format_args!("DnsServer {}", self.get_ip()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,8 +148,8 @@ pub enum DnsRecordType {
|
|||||||
TXT,
|
TXT,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for DnsRecordType {
|
impl fmt::Display for DnsRecordType {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
DnsRecordType::A => write!(f, "A"),
|
DnsRecordType::A => write!(f, "A"),
|
||||||
DnsRecordType::AAAA => write!(f, "AAAA"),
|
DnsRecordType::AAAA => write!(f, "AAAA"),
|
||||||
@@ -172,6 +183,108 @@ impl FromStr for DnsRecordType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait NetworkManager: Debug + Send + Sync {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError>;
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, new)]
|
||||||
|
pub struct NetworkError {
|
||||||
|
msg: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for NetworkError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str(&self.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for NetworkError {}
|
||||||
|
|
||||||
|
impl From<kube::Error> for NetworkError {
|
||||||
|
fn from(value: kube::Error) -> Self {
|
||||||
|
NetworkError::new(value.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for NetworkError {
|
||||||
|
fn from(value: String) -> Self {
|
||||||
|
NetworkError::new(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Switch: Send + Sync {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct HostNetworkConfig {
|
||||||
|
pub host_id: Id,
|
||||||
|
pub switch_ports: Vec<SwitchPort>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct SwitchPort {
|
||||||
|
pub interface: NetworkInterface,
|
||||||
|
pub port: PortLocation,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct NetworkInterface {
|
||||||
|
pub name: String,
|
||||||
|
pub mac_address: MacAddress,
|
||||||
|
pub speed_mbps: Option<u32>,
|
||||||
|
pub mtu: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, new)]
|
||||||
|
pub struct SwitchError {
|
||||||
|
msg: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SwitchError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str(&self.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for SwitchError {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait SwitchClient: Debug + Send + Sync {
|
||||||
|
/// Executes essential, idempotent, one-time initial configuration steps.
|
||||||
|
///
|
||||||
|
/// This is an opiniated procedure that setups a switch to provide high availability
|
||||||
|
/// capabilities as decided by the NationTech team.
|
||||||
|
///
|
||||||
|
/// This includes tasks like enabling switchport for all interfaces
|
||||||
|
/// except the ones intended for Fabric Networking, etc.
|
||||||
|
///
|
||||||
|
/// The implementation must ensure the operation is **idempotent** (safe to run multiple times)
|
||||||
|
/// and that it doesn't break existing configurations.
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError>;
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_name: &str,
|
||||||
|
switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError>;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub struct AlertingInterpret<S: AlertSender> {
|
|||||||
pub sender: S,
|
pub sender: S,
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<S>>>,
|
||||||
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
pub rules: Vec<Box<dyn AlertRule<S>>>,
|
||||||
|
pub scrape_targets: Option<Vec<Box<dyn ScrapeTarget<S>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -30,6 +31,7 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
debug!("hit sender configure for AlertingInterpret");
|
||||||
self.sender.configure(inventory, topology).await?;
|
self.sender.configure(inventory, topology).await?;
|
||||||
for receiver in self.receivers.iter() {
|
for receiver in self.receivers.iter() {
|
||||||
receiver.install(&self.sender).await?;
|
receiver.install(&self.sender).await?;
|
||||||
@@ -38,6 +40,12 @@ impl<S: AlertSender + Installable<T>, T: Topology> Interpret<T> for AlertingInte
|
|||||||
debug!("installing rule: {:#?}", rule);
|
debug!("installing rule: {:#?}", rule);
|
||||||
rule.install(&self.sender).await?;
|
rule.install(&self.sender).await?;
|
||||||
}
|
}
|
||||||
|
if let Some(targets) = &self.scrape_targets {
|
||||||
|
for target in targets.iter() {
|
||||||
|
debug!("installing scrape_target: {:#?}", target);
|
||||||
|
target.install(&self.sender).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
self.sender.ensure_installed(inventory, topology).await?;
|
self.sender.ensure_installed(inventory, topology).await?;
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"successfully installed alert sender {}",
|
"successfully installed alert sender {}",
|
||||||
@@ -77,6 +85,7 @@ pub trait AlertRule<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait ScrapeTarget<S: AlertSender> {
|
pub trait ScrapeTarget<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
||||||
async fn install(&self, sender: &S) -> Result<(), InterpretError>;
|
async fn install(&self, sender: &S) -> Result<Outcome, InterpretError>;
|
||||||
|
fn clone_box(&self) -> Box<dyn ScrapeTarget<S>>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,20 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
use cidr::Ipv4Cidr;
|
use cidr::Ipv4Cidr;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use super::{IpAddress, LogicalHost};
|
use super::{IpAddress, LogicalHost};
|
||||||
|
|
||||||
|
/// Basic network router abstraction (L3 IP routing/gateway).
|
||||||
|
/// Distinguished from TlsRouter (L4 TLS passthrough).
|
||||||
pub trait Router: Send + Sync {
|
pub trait Router: Send + Sync {
|
||||||
|
/// Gateway IP address for this subnet/router.
|
||||||
fn get_gateway(&self) -> IpAddress;
|
fn get_gateway(&self) -> IpAddress;
|
||||||
|
|
||||||
|
/// CIDR block managed by this router.
|
||||||
fn get_cidr(&self) -> Ipv4Cidr;
|
fn get_cidr(&self) -> Ipv4Cidr;
|
||||||
|
|
||||||
|
/// Logical host associated with this router.
|
||||||
fn get_host(&self) -> LogicalHost;
|
fn get_host(&self) -> LogicalHost;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,3 +47,81 @@ impl Router for UnmanagedRouter {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Desired state config for a TLS passthrough route.
|
||||||
|
/// Forwards external TLS (port 443) → backend service:target_port (no termination at router).
|
||||||
|
/// Inspired by CNPG multisite: exposes `-rw`/`-ro` services publicly via OKD Route/HAProxy/K8s
|
||||||
|
/// Gateway etc.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// use harmony::topology::router::TlsRoute;
|
||||||
|
/// let postgres_rw = TlsRoute {
|
||||||
|
/// hostname: "postgres-cluster-example.public.domain.io".to_string(),
|
||||||
|
/// backend: "postgres-cluster-example-rw".to_string(), // k8s Service or HAProxy upstream
|
||||||
|
/// target_port: 5432,
|
||||||
|
/// };
|
||||||
|
/// ```
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct TlsRoute {
|
||||||
|
/// Public hostname clients connect to (TLS SNI, port 443 implicit).
|
||||||
|
/// Router matches this for passthrough forwarding.
|
||||||
|
pub hostname: String,
|
||||||
|
|
||||||
|
/// Backend/host identifier (k8s Service, HAProxy upstream, IP/FQDN, etc.).
|
||||||
|
pub backend: String,
|
||||||
|
|
||||||
|
/// Backend TCP port (Postgres: 5432).
|
||||||
|
pub target_port: u16,
|
||||||
|
|
||||||
|
/// The environment in which it lives.
|
||||||
|
/// TODO clarify how we handle this in higher level abstractions. The namespace name is a
|
||||||
|
/// direct mapping to k8s but that could be misleading for other implementations.
|
||||||
|
pub namespace: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TlsRoute {
|
||||||
|
pub fn to_string_short(&self) -> String {
|
||||||
|
format!("{}-{}:{}", self.hostname, self.backend, self.target_port)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backend_info_string(&self) -> String {
|
||||||
|
format!("{}:{}", self.backend, self.target_port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Installs and queries TLS passthrough routes (L4 TCP/SNI forwarding, no TLS termination).
|
||||||
|
/// Agnostic to impl: OKD Route, AWS NLB+HAProxy, k3s Envoy Gateway, Apache ProxyPass.
|
||||||
|
/// Used by PostgreSQL capability to expose CNPG clusters multisite (site1 → site2 replication).
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```ignore
|
||||||
|
/// use harmony::topology::router::TlsRoute;
|
||||||
|
/// // After CNPG deploy, expose RW endpoint
|
||||||
|
/// async fn route() {
|
||||||
|
/// let topology = okd_topology();
|
||||||
|
/// let route = TlsRoute { /* ... */ };
|
||||||
|
/// topology.install_route(route).await; // OKD Route, HAProxy reload, etc.
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[async_trait]
|
||||||
|
pub trait TlsRouter: Send + Sync {
|
||||||
|
/// Provisions the route (idempotent where possible).
|
||||||
|
/// Example: OKD Route{ host, to: backend:target_port, tls: {passthrough} };
|
||||||
|
/// HAProxy frontend→backend \"postgres-upstream\".
|
||||||
|
async fn install_route(&self, config: TlsRoute) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Gets the base domain that can be used to deploy applications that will be automatically
|
||||||
|
/// routed to this cluster.
|
||||||
|
///
|
||||||
|
/// For example, if we have *.apps.nationtech.io pointing to a public load balancer, then this
|
||||||
|
/// function would return
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// Some(String::new("apps.nationtech.io"))
|
||||||
|
/// ```
|
||||||
|
async fn get_wildcard_domain(&self) -> Result<Option<String>, String>;
|
||||||
|
|
||||||
|
/// Returns the port that this router exposes externally.
|
||||||
|
async fn get_router_port(&self) -> u16;
|
||||||
|
}
|
||||||
|
|||||||
378
harmony/src/infra/brocade.rs
Normal file
378
harmony/src/infra/brocade.rs
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||||
|
use harmony_types::{
|
||||||
|
net::{IpAddress, MacAddress},
|
||||||
|
switch::{PortDeclaration, PortLocation},
|
||||||
|
};
|
||||||
|
use option_ext::OptionExt;
|
||||||
|
|
||||||
|
use crate::topology::{SwitchClient, SwitchError};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BrocadeSwitchClient {
|
||||||
|
brocade: Box<dyn BrocadeClient + Send + Sync>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BrocadeSwitchClient {
|
||||||
|
pub async fn init(
|
||||||
|
ip_addresses: &[IpAddress],
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
options: Option<BrocadeOptions>,
|
||||||
|
) -> Result<Self, brocade::Error> {
|
||||||
|
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
||||||
|
Ok(Self { brocade })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SwitchClient for BrocadeSwitchClient {
|
||||||
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
|
let stack_topology = self
|
||||||
|
.brocade
|
||||||
|
.get_stack_topology()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
let interfaces = self
|
||||||
|
.brocade
|
||||||
|
.get_interfaces()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
let interfaces: Vec<(String, PortOperatingMode)> = interfaces
|
||||||
|
.into_iter()
|
||||||
|
.filter(|interface| {
|
||||||
|
interface.operating_mode.is_none() && interface.status == InterfaceStatus::Connected
|
||||||
|
})
|
||||||
|
.filter(|interface| {
|
||||||
|
!stack_topology.iter().any(|link: &InterSwitchLink| {
|
||||||
|
link.local_port == interface.port_location
|
||||||
|
|| link.remote_port.contains(&interface.port_location)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.map(|interface| (interface.name.clone(), PortOperatingMode::Access))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if interfaces.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.brocade
|
||||||
|
.configure_interfaces(interfaces)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_port(
|
||||||
|
&self,
|
||||||
|
mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
let table = self
|
||||||
|
.brocade
|
||||||
|
.get_mac_address_table()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
let port = table
|
||||||
|
.iter()
|
||||||
|
.find(|entry| entry.mac_address == *mac_address)
|
||||||
|
.map(|entry| match &entry.port {
|
||||||
|
PortDeclaration::Single(port_location) => Ok(port_location.clone()),
|
||||||
|
_ => Err(SwitchError::new(
|
||||||
|
"Multiple ports found for MAC address".into(),
|
||||||
|
)),
|
||||||
|
});
|
||||||
|
|
||||||
|
match port {
|
||||||
|
Some(Ok(p)) => Ok(Some(p)),
|
||||||
|
Some(Err(e)) => Err(e),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(
|
||||||
|
&self,
|
||||||
|
channel_name: &str,
|
||||||
|
switch_ports: Vec<PortLocation>,
|
||||||
|
) -> Result<u8, SwitchError> {
|
||||||
|
let channel_id = self
|
||||||
|
.brocade
|
||||||
|
.find_available_channel_id()
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
self.brocade
|
||||||
|
.create_port_channel(channel_id, channel_name, &switch_ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("{e}")))?;
|
||||||
|
|
||||||
|
Ok(channel_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use assertor::*;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::{
|
||||||
|
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
||||||
|
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
|
};
|
||||||
|
use harmony_types::switch::PortLocation;
|
||||||
|
|
||||||
|
use crate::{infra::brocade::BrocadeSwitchClient, topology::SwitchClient};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_should_configure_ethernet_interfaces_as_access_ports() {
|
||||||
|
let first_interface = given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 1))
|
||||||
|
.build();
|
||||||
|
let second_interface = given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 4))
|
||||||
|
.build();
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![first_interface.clone(), second_interface.clone()],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||||
|
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||||
|
(second_interface.name.clone(), PortOperatingMode::Access),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_an_already_configured_interface_should_skip_configuration() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_operating_mode(Some(PortOperatingMode::Access))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_a_disconnected_interface_should_skip_configuration() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_status(InterfaceStatus::SfpAbsent)
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_status(InterfaceStatus::NotConnected)
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn setup_with_inter_switch_links_should_not_configure_interfaces_used_to_form_stack() {
|
||||||
|
let brocade = Box::new(FakeBrocadeClient::new(
|
||||||
|
vec![
|
||||||
|
given_inter_switch_link()
|
||||||
|
.between(PortLocation(1, 0, 1), PortLocation(2, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_inter_switch_link()
|
||||||
|
.between(PortLocation(2, 0, 2), PortLocation(3, 0, 1))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(1, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(2, 0, 1))
|
||||||
|
.build(),
|
||||||
|
given_interface()
|
||||||
|
.with_port_location(PortLocation(3, 0, 1))
|
||||||
|
.build(),
|
||||||
|
],
|
||||||
|
));
|
||||||
|
let client = BrocadeSwitchClient {
|
||||||
|
brocade: brocade.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
|
assert_that!(*configured_interfaces).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct FakeBrocadeClient {
|
||||||
|
stack_topology: Vec<InterSwitchLink>,
|
||||||
|
interfaces: Vec<InterfaceInfo>,
|
||||||
|
configured_interfaces: Arc<Mutex<Vec<(String, PortOperatingMode)>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl BrocadeClient for FakeBrocadeClient {
|
||||||
|
async fn version(&self) -> Result<BrocadeInfo, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_mac_address_table(&self) -> Result<Vec<MacAddressEntry>, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_stack_topology(&self) -> Result<Vec<InterSwitchLink>, Error> {
|
||||||
|
Ok(self.stack_topology.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_interfaces(&self) -> Result<Vec<InterfaceInfo>, Error> {
|
||||||
|
Ok(self.interfaces.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_interfaces(
|
||||||
|
&self,
|
||||||
|
interfaces: Vec<(String, PortOperatingMode)>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||||
|
*configured_interfaces = interfaces;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_available_channel_id(&self) -> Result<PortChannelId, Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_port_channel(
|
||||||
|
&self,
|
||||||
|
_channel_id: PortChannelId,
|
||||||
|
_channel_name: &str,
|
||||||
|
_ports: &[PortLocation],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeBrocadeClient {
|
||||||
|
fn new(stack_topology: Vec<InterSwitchLink>, interfaces: Vec<InterfaceInfo>) -> Self {
|
||||||
|
Self {
|
||||||
|
stack_topology,
|
||||||
|
interfaces,
|
||||||
|
configured_interfaces: Arc::new(Mutex::new(vec![])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InterfaceInfoBuilder {
|
||||||
|
port_location: Option<PortLocation>,
|
||||||
|
interface_type: Option<InterfaceType>,
|
||||||
|
operating_mode: Option<PortOperatingMode>,
|
||||||
|
status: Option<InterfaceStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InterfaceInfoBuilder {
|
||||||
|
fn build(&self) -> InterfaceInfo {
|
||||||
|
let interface_type = self
|
||||||
|
.interface_type
|
||||||
|
.clone()
|
||||||
|
.unwrap_or(InterfaceType::Ethernet("TenGigabitEthernet".into()));
|
||||||
|
let port_location = self.port_location.clone().unwrap_or(PortLocation(1, 0, 1));
|
||||||
|
let name = format!("{interface_type} {port_location}");
|
||||||
|
let status = self.status.clone().unwrap_or(InterfaceStatus::Connected);
|
||||||
|
|
||||||
|
InterfaceInfo {
|
||||||
|
name,
|
||||||
|
port_location,
|
||||||
|
interface_type,
|
||||||
|
operating_mode: self.operating_mode.clone(),
|
||||||
|
status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_port_location(self, port_location: PortLocation) -> Self {
|
||||||
|
Self {
|
||||||
|
port_location: Some(port_location),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_operating_mode(self, operating_mode: Option<PortOperatingMode>) -> Self {
|
||||||
|
Self {
|
||||||
|
operating_mode,
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_status(self, status: InterfaceStatus) -> Self {
|
||||||
|
Self {
|
||||||
|
status: Some(status),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InterSwitchLinkBuilder {
|
||||||
|
link: Option<(PortLocation, PortLocation)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InterSwitchLinkBuilder {
|
||||||
|
fn build(&self) -> InterSwitchLink {
|
||||||
|
let link = self
|
||||||
|
.link
|
||||||
|
.clone()
|
||||||
|
.unwrap_or((PortLocation(1, 0, 1), PortLocation(2, 0, 1)));
|
||||||
|
|
||||||
|
InterSwitchLink {
|
||||||
|
local_port: link.0,
|
||||||
|
remote_port: Some(link.1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn between(self, local_port: PortLocation, remote_port: PortLocation) -> Self {
|
||||||
|
Self {
|
||||||
|
link: Some((local_port, remote_port)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_interface() -> InterfaceInfoBuilder {
|
||||||
|
InterfaceInfoBuilder {
|
||||||
|
port_location: None,
|
||||||
|
interface_type: None,
|
||||||
|
operating_mode: None,
|
||||||
|
status: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn given_inter_switch_link() -> InterSwitchLinkBuilder {
|
||||||
|
InterSwitchLinkBuilder { link: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
182
harmony/src/infra/kube.rs
Normal file
182
harmony/src/infra/kube.rs
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
use k8s_openapi::Resource as K8sResource;
|
||||||
|
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
||||||
|
use kube::core::TypeMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
||||||
|
///
|
||||||
|
/// Requirements:
|
||||||
|
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
||||||
|
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
||||||
|
///
|
||||||
|
/// Notes:
|
||||||
|
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
||||||
|
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
||||||
|
/// - Scope is not encoded on the object itself; you still need the corresponding
|
||||||
|
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
||||||
|
///
|
||||||
|
/// Example usage:
|
||||||
|
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
||||||
|
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
||||||
|
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
||||||
|
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
||||||
|
where
|
||||||
|
K: K8sResource + Serialize + DeserializeOwned,
|
||||||
|
{
|
||||||
|
// Serialize the typed resource to JSON so we can split metadata and payload
|
||||||
|
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
||||||
|
let obj = v
|
||||||
|
.as_object_mut()
|
||||||
|
.ok_or_else(|| "expected object JSON".to_string())?;
|
||||||
|
|
||||||
|
// Extract and parse metadata into kube::core::ObjectMeta
|
||||||
|
let metadata_value = obj
|
||||||
|
.remove("metadata")
|
||||||
|
.ok_or_else(|| "missing metadata".to_string())?;
|
||||||
|
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
||||||
|
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
||||||
|
|
||||||
|
// Name is required for DynamicObject::new; prefer metadata.name
|
||||||
|
let name = metadata
|
||||||
|
.name
|
||||||
|
.clone()
|
||||||
|
.ok_or_else(|| "metadata.name is required".to_string())?;
|
||||||
|
|
||||||
|
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
||||||
|
let payload = Value::Object(obj.clone());
|
||||||
|
|
||||||
|
// Construct the DynamicObject
|
||||||
|
let mut dyn_obj = DynamicObject::new(
|
||||||
|
&name,
|
||||||
|
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
||||||
|
);
|
||||||
|
dyn_obj.types = Some(TypeMeta {
|
||||||
|
api_version: api_version_for::<K>(),
|
||||||
|
kind: K::KIND.into(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Preserve namespace/labels/annotations/etc.
|
||||||
|
dyn_obj.metadata = metadata;
|
||||||
|
|
||||||
|
// Attach payload
|
||||||
|
dyn_obj.data = payload;
|
||||||
|
|
||||||
|
Ok(dyn_obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
||||||
|
fn api_version_for<K>() -> String
|
||||||
|
where
|
||||||
|
K: K8sResource,
|
||||||
|
{
|
||||||
|
let group = K::GROUP;
|
||||||
|
let version = K::VERSION;
|
||||||
|
if group.is_empty() {
|
||||||
|
version.to_string() // core/v1 => "v1"
|
||||||
|
} else {
|
||||||
|
format!("{}/{}", group, version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use k8s_openapi::api::{
|
||||||
|
apps::v1::{Deployment, DeploymentSpec},
|
||||||
|
core::v1::{PodTemplateSpec, Secret},
|
||||||
|
};
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn secret_to_dynamic_roundtrip() {
|
||||||
|
// Create a sample Secret resource
|
||||||
|
let mut secret = Secret {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("my-secret".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Convert to DynamicResource
|
||||||
|
let dynamic: DynamicObject =
|
||||||
|
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
||||||
|
|
||||||
|
// Serialize both the original and dynamic resources to Value
|
||||||
|
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||||
|
let dynamic_value =
|
||||||
|
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
||||||
|
|
||||||
|
// Assert that they are identical
|
||||||
|
assert_eq!(original_value, dynamic_value);
|
||||||
|
|
||||||
|
secret.metadata.namespace = Some("false".to_string());
|
||||||
|
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||||
|
assert_ne!(modified_value, dynamic_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deployment_to_dynamic_roundtrip() {
|
||||||
|
// Create a sample Deployment with nested structures
|
||||||
|
let mut deployment = Deployment {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("my-deployment".to_string()),
|
||||||
|
labels: Some({
|
||||||
|
let mut map = std::collections::BTreeMap::new();
|
||||||
|
map.insert("app".to_string(), "nginx".to_string());
|
||||||
|
map
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: Some(DeploymentSpec {
|
||||||
|
replicas: Some(3),
|
||||||
|
selector: Default::default(),
|
||||||
|
template: PodTemplateSpec {
|
||||||
|
metadata: Some(ObjectMeta {
|
||||||
|
labels: Some({
|
||||||
|
let mut map = std::collections::BTreeMap::new();
|
||||||
|
map.insert("app".to_string(), "nginx".to_string());
|
||||||
|
map
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
||||||
|
|
||||||
|
let original_value = serde_json::to_value(&deployment).unwrap();
|
||||||
|
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(original_value, dynamic_value);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
||||||
|
3
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
dynamic
|
||||||
|
.data
|
||||||
|
.get("spec")
|
||||||
|
.unwrap()
|
||||||
|
.get("template")
|
||||||
|
.unwrap()
|
||||||
|
.get("metadata")
|
||||||
|
.unwrap()
|
||||||
|
.get("labels")
|
||||||
|
.unwrap()
|
||||||
|
.get("app")
|
||||||
|
.unwrap()
|
||||||
|
.as_str()
|
||||||
|
.unwrap(),
|
||||||
|
"nginx".to_string()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
|
pub mod brocade;
|
||||||
pub mod executors;
|
pub mod executors;
|
||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
|
pub mod kube;
|
||||||
|
pub mod network_manager;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
mod sqlx;
|
mod sqlx;
|
||||||
|
|||||||
264
harmony/src/infra/network_manager.rs
Normal file
264
harmony/src/infra/network_manager.rs
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use k8s_openapi::api::core::v1::Node;
|
||||||
|
use kube::{
|
||||||
|
ResourceExt,
|
||||||
|
api::{ObjectList, ObjectMeta},
|
||||||
|
};
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::okd::crd::nmstate,
|
||||||
|
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
||||||
|
/// It is documented in nmstate official doc, but worth mentionning here :
|
||||||
|
///
|
||||||
|
/// - You create a bond, nmstate will apply it
|
||||||
|
/// - You delete de bond from nmstate, it will NOT delete it
|
||||||
|
/// - To delete it you have to update it with configuration set to null
|
||||||
|
pub struct OpenShiftNmStateNetworkManager {
|
||||||
|
k8s_client: Arc<K8sClient>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("OpenShiftNmStateNetworkManager").finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
|
debug!("Installing NMState controller...");
|
||||||
|
// TODO use operatorhub maybe?
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState namespace...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState service account...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role binding...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState operator...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.k8s_client
|
||||||
|
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let nmstate = nmstate::NMState {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
debug!(
|
||||||
|
"Creating NMState:\n{}",
|
||||||
|
serde_yaml::to_string(&nmstate).unwrap()
|
||||||
|
);
|
||||||
|
self.k8s_client.apply(&nmstate, None).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||||
|
let hostname = self.get_hostname(&config.host_id).await.map_err(|e| {
|
||||||
|
NetworkError::new(format!(
|
||||||
|
"Can't configure bond, can't get hostname for host '{}': {e}",
|
||||||
|
config.host_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let bond_id = self.get_next_bond_id(&hostname).await.map_err(|e| {
|
||||||
|
NetworkError::new(format!(
|
||||||
|
"Can't configure bond, can't get an available bond id for host '{}': {e}",
|
||||||
|
config.host_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let bond_config = self.create_bond_configuration(&hostname, &bond_id, config);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Applying NMState bond config for host {}:\n{}",
|
||||||
|
config.host_id,
|
||||||
|
serde_yaml::to_string(&bond_config).unwrap(),
|
||||||
|
);
|
||||||
|
self.k8s_client
|
||||||
|
.apply(&bond_config, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| NetworkError::new(format!("Failed to configure bond: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenShiftNmStateNetworkManager {
|
||||||
|
pub fn new(k8s_client: Arc<K8sClient>) -> Self {
|
||||||
|
Self { k8s_client }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_bond_configuration(
|
||||||
|
&self,
|
||||||
|
host: &str,
|
||||||
|
bond_name: &str,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> nmstate::NodeNetworkConfigurationPolicy {
|
||||||
|
info!("Configuring bond '{bond_name}' for host '{host}'...");
|
||||||
|
|
||||||
|
let mut bond_mtu: Option<u32> = None;
|
||||||
|
let mut copy_mac_from: Option<String> = None;
|
||||||
|
let mut bond_ports = Vec::new();
|
||||||
|
let mut interfaces: Vec<nmstate::Interface> = Vec::new();
|
||||||
|
|
||||||
|
for switch_port in &config.switch_ports {
|
||||||
|
let interface_name = switch_port.interface.name.clone();
|
||||||
|
|
||||||
|
interfaces.push(nmstate::Interface {
|
||||||
|
name: interface_name.clone(),
|
||||||
|
description: Some(format!("Member of bond {bond_name}")),
|
||||||
|
r#type: nmstate::InterfaceType::Ethernet,
|
||||||
|
state: "up".to_string(),
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: None,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
bond_ports.push(interface_name.clone());
|
||||||
|
|
||||||
|
// Use the first port's details for the bond mtu and mac address
|
||||||
|
if bond_mtu.is_none() {
|
||||||
|
bond_mtu = Some(switch_port.interface.mtu);
|
||||||
|
}
|
||||||
|
if copy_mac_from.is_none() {
|
||||||
|
copy_mac_from = Some(interface_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces.push(nmstate::Interface {
|
||||||
|
name: bond_name.to_string(),
|
||||||
|
description: Some(format!("HARMONY - Network bond for host {host}")),
|
||||||
|
r#type: nmstate::InterfaceType::Bond,
|
||||||
|
state: "up".to_string(),
|
||||||
|
copy_mac_from,
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
autoconf: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: Some(nmstate::BondSpec {
|
||||||
|
mode: "802.3ad".to_string(),
|
||||||
|
ports: bond_ports,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
nmstate::NodeNetworkConfigurationPolicy {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("{host}-bond-config")),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: nmstate::NodeNetworkConfigurationPolicySpec {
|
||||||
|
node_selector: Some(BTreeMap::from([(
|
||||||
|
"kubernetes.io/hostname".to_string(),
|
||||||
|
host.to_string(),
|
||||||
|
)])),
|
||||||
|
desired_state: nmstate::NetworkState {
|
||||||
|
interfaces,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_hostname(&self, host_id: &Id) -> Result<String, String> {
|
||||||
|
let nodes: ObjectList<Node> = self
|
||||||
|
.k8s_client
|
||||||
|
.list_resources(None, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||||
|
|
||||||
|
let Some(node) = nodes.iter().find(|n| {
|
||||||
|
n.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.node_info.as_ref())
|
||||||
|
.map(|i| i.system_uuid == host_id.to_string())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}) else {
|
||||||
|
return Err(format!("No node found for host '{host_id}'"));
|
||||||
|
};
|
||||||
|
|
||||||
|
node.labels()
|
||||||
|
.get("kubernetes.io/hostname")
|
||||||
|
.ok_or(format!(
|
||||||
|
"Node '{host_id}' has no kubernetes.io/hostname label"
|
||||||
|
))
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_next_bond_id(&self, hostname: &str) -> Result<String, String> {
|
||||||
|
let network_state: Option<nmstate::NodeNetworkState> = self
|
||||||
|
.k8s_client
|
||||||
|
.get_resource(hostname, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||||
|
|
||||||
|
let interfaces = vec![];
|
||||||
|
let existing_bonds: Vec<&nmstate::Interface> = network_state
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|network_state| network_state.status.current_state.as_ref())
|
||||||
|
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
||||||
|
.iter()
|
||||||
|
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let used_ids: HashSet<u32> = existing_bonds
|
||||||
|
.iter()
|
||||||
|
.filter_map(|i| {
|
||||||
|
i.name
|
||||||
|
.strip_prefix("bond")
|
||||||
|
.and_then(|id| id.parse::<u32>().ok())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let next_id = (0..).find(|id| !used_ids.contains(id)).unwrap();
|
||||||
|
Ok(format!("bond{next_id}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -36,7 +36,7 @@ impl HttpServer for OPNSenseFirewall {
|
|||||||
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
async fn serve_file_content(&self, file: &FileContent) -> Result<(), ExecutorError> {
|
||||||
let path = match &file.path {
|
let path = match &file.path {
|
||||||
crate::data::FilePath::Relative(path) => {
|
crate::data::FilePath::Relative(path) => {
|
||||||
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path)
|
format!("{OPNSENSE_HTTP_ROOT_PATH}/{}", path.to_string())
|
||||||
}
|
}
|
||||||
crate::data::FilePath::Absolute(path) => {
|
crate::data::FilePath::Absolute(path) => {
|
||||||
return Err(ExecutorError::ConfigurationError(format!(
|
return Err(ExecutorError::ConfigurationError(format!(
|
||||||
|
|||||||
@@ -26,19 +26,13 @@ impl LoadBalancer for OPNSenseFirewall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
async fn add_service(&self, service: &LoadBalancerService) -> Result<(), ExecutorError> {
|
||||||
warn!(
|
|
||||||
"TODO : the current implementation does not check / cleanup / merge with existing haproxy services properly. Make sure to manually verify that the configuration is correct after executing any operation here"
|
|
||||||
);
|
|
||||||
let mut config = self.opnsense_config.write().await;
|
let mut config = self.opnsense_config.write().await;
|
||||||
|
let mut load_balancer = config.load_balancer();
|
||||||
|
|
||||||
let (frontend, backend, servers, healthcheck) =
|
let (frontend, backend, servers, healthcheck) =
|
||||||
harmony_load_balancer_service_to_haproxy_xml(service);
|
harmony_load_balancer_service_to_haproxy_xml(service);
|
||||||
let mut load_balancer = config.load_balancer();
|
|
||||||
load_balancer.add_backend(backend);
|
load_balancer.configure_service(frontend, backend, servers, healthcheck);
|
||||||
load_balancer.add_frontend(frontend);
|
|
||||||
load_balancer.add_servers(servers);
|
|
||||||
if let Some(healthcheck) = healthcheck {
|
|
||||||
load_balancer.add_healthcheck(healthcheck);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -106,7 +100,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
.backends
|
.backends
|
||||||
.backends
|
.backends
|
||||||
.iter()
|
.iter()
|
||||||
.find(|b| b.uuid == frontend.default_backend);
|
.find(|b| Some(b.uuid.clone()) == frontend.default_backend);
|
||||||
|
|
||||||
let mut health_check = None;
|
let mut health_check = None;
|
||||||
match matching_backend {
|
match matching_backend {
|
||||||
@@ -116,8 +110,7 @@ pub(crate) fn haproxy_xml_config_to_harmony_loadbalancer(
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!(
|
warn!(
|
||||||
"HAProxy config could not find a matching backend for frontend {:?}",
|
"HAProxy config could not find a matching backend for frontend {frontend:?}"
|
||||||
frontend
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -152,11 +145,11 @@ pub(crate) fn get_servers_for_backend(
|
|||||||
.servers
|
.servers
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|server| {
|
.filter_map(|server| {
|
||||||
|
let address = server.address.clone()?;
|
||||||
|
let port = server.port?;
|
||||||
|
|
||||||
if backend_servers.contains(&server.uuid.as_str()) {
|
if backend_servers.contains(&server.uuid.as_str()) {
|
||||||
return Some(BackendServer {
|
return Some(BackendServer { address, port });
|
||||||
address: server.address.clone(),
|
|
||||||
port: server.port,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
@@ -182,13 +175,17 @@ pub(crate) fn get_health_check_for_backend(
|
|||||||
let uppercase = binding.as_str();
|
let uppercase = binding.as_str();
|
||||||
match uppercase {
|
match uppercase {
|
||||||
"TCP" => {
|
"TCP" => {
|
||||||
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref()
|
if let Some(checkport) = haproxy_health_check.checkport.content.as_ref() {
|
||||||
&& !checkport.is_empty()
|
if !checkport.is_empty() {
|
||||||
{
|
|
||||||
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|
return Some(HealthCheck::TCP(Some(checkport.parse().unwrap_or_else(
|
||||||
|_| panic!("HAProxy check port should be a valid port number, got {checkport}"),
|
|_| {
|
||||||
|
panic!(
|
||||||
|
"HAProxy check port should be a valid port number, got {checkport}"
|
||||||
|
)
|
||||||
|
},
|
||||||
))));
|
))));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
Some(HealthCheck::TCP(None))
|
Some(HealthCheck::TCP(None))
|
||||||
}
|
}
|
||||||
"HTTP" => {
|
"HTTP" => {
|
||||||
@@ -343,7 +340,7 @@ pub(crate) fn harmony_load_balancer_service_to_haproxy_xml(
|
|||||||
name: format!("frontend_{}", service.listening_port),
|
name: format!("frontend_{}", service.listening_port),
|
||||||
bind: service.listening_port.to_string(),
|
bind: service.listening_port.to_string(),
|
||||||
mode: "tcp".to_string(), // TODO do not depend on health check here
|
mode: "tcp".to_string(), // TODO do not depend on health check here
|
||||||
default_backend: backend.uuid.clone(),
|
default_backend: Some(backend.uuid.clone()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
info!("HAPRoxy frontend and backend mode currently hardcoded to tcp");
|
||||||
@@ -357,8 +354,8 @@ fn server_to_haproxy_server(server: &BackendServer) -> HAProxyServer {
|
|||||||
uuid: Uuid::new_v4().to_string(),
|
uuid: Uuid::new_v4().to_string(),
|
||||||
name: format!("{}_{}", &server.address, &server.port),
|
name: format!("{}_{}", &server.address, &server.port),
|
||||||
enabled: 1,
|
enabled: 1,
|
||||||
address: server.address.clone(),
|
address: Some(server.address.clone()),
|
||||||
port: server.port,
|
port: Some(server.port),
|
||||||
mode: "active".to_string(),
|
mode: "active".to_string(),
|
||||||
server_type: "static".to_string(),
|
server_type: "static".to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -381,8 +378,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -407,8 +404,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -427,8 +424,8 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "192.168.1.1".to_string(),
|
address: Some("192.168.1.1".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
@@ -449,16 +446,16 @@ mod tests {
|
|||||||
let mut haproxy = HAProxy::default();
|
let mut haproxy = HAProxy::default();
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server1".to_string(),
|
uuid: "server1".to_string(),
|
||||||
address: "some-hostname.test.mcd".to_string(),
|
address: Some("some-hostname.test.mcd".to_string()),
|
||||||
port: 80,
|
port: Some(80),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|
||||||
let server = HAProxyServer {
|
let server = HAProxyServer {
|
||||||
uuid: "server2".to_string(),
|
uuid: "server2".to_string(),
|
||||||
address: "192.168.1.2".to_string(),
|
address: Some("192.168.1.2".to_string()),
|
||||||
port: 8080,
|
port: Some(8080),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
haproxy.servers.servers.push(server);
|
haproxy.servers.servers.push(server);
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ mod tftp;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub use management::*;
|
pub use management::*;
|
||||||
|
use opnsense_config_xml::Host;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use harmony_macros::hurl;
|
||||||
use kube::api::GroupVersionKind;
|
use kube::api::GroupVersionKind;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
@@ -114,13 +115,13 @@ impl ArgoInterpret {
|
|||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
match ic.data["status"]["domain"].as_str() {
|
||||||
Some(domain) => return Ok(domain.to_string()),
|
Some(domain) => return Ok(domain.to_string()),
|
||||||
None => Err(InterpretError::new("Could not find domain".to_string())),
|
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
false => {
|
false => {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1046,7 +1047,7 @@ commitServer:
|
|||||||
install_only: false,
|
install_only: false,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"argo".to_string(),
|
"argo".to_string(),
|
||||||
url::Url::parse("https://argoproj.github.io/argo-helm").unwrap(),
|
hurl!("https://argoproj.github.io/argo-helm"),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,11 @@ use crate::modules::application::{
|
|||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
|
use crate::modules::monitoring::grafana::grafana::Grafana;
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus;
|
||||||
|
use crate::modules::monitoring::kube_prometheus::crd::service_monitor::{
|
||||||
|
ServiceMonitor, ServiceMonitorSpec,
|
||||||
|
};
|
||||||
use crate::topology::MultiTargetTopology;
|
use crate::topology::MultiTargetTopology;
|
||||||
use crate::topology::ingress::Ingress;
|
use crate::topology::ingress::Ingress;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -14,7 +18,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -22,6 +26,7 @@ use base64::{Engine as _, engine::general_purpose};
|
|||||||
use harmony_secret::SecretManager;
|
use harmony_secret::SecretManager;
|
||||||
use harmony_secret_derive::Secret;
|
use harmony_secret_derive::Secret;
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -40,7 +45,8 @@ impl<
|
|||||||
+ TenantManager
|
+ TenantManager
|
||||||
+ K8sclient
|
+ K8sclient
|
||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ PrometheusApplicationMonitoring<CRDPrometheus>
|
+ PrometheusMonitoring<CRDPrometheus>
|
||||||
|
+ Grafana
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug,
|
+ std::fmt::Debug,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
@@ -57,10 +63,20 @@ impl<
|
|||||||
.unwrap_or_else(|| self.application.name());
|
.unwrap_or_else(|| self.application.name());
|
||||||
let domain = topology.get_domain("ntfy").await.unwrap();
|
let domain = topology.get_domain("ntfy").await.unwrap();
|
||||||
|
|
||||||
|
let app_service_monitor = ServiceMonitor {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(self.application.name()),
|
||||||
|
namespace: Some(namespace.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: ServiceMonitorSpec::default(),
|
||||||
|
};
|
||||||
|
|
||||||
let mut alerting_score = ApplicationMonitoringScore {
|
let mut alerting_score = ApplicationMonitoringScore {
|
||||||
sender: CRDPrometheus {
|
sender: CRDPrometheus {
|
||||||
namespace: namespace.clone(),
|
namespace: namespace.clone(),
|
||||||
client: topology.k8s_client().await.unwrap(),
|
client: topology.k8s_client().await.unwrap(),
|
||||||
|
service_monitor: vec![app_service_monitor],
|
||||||
},
|
},
|
||||||
application: self.application.clone(),
|
application: self.application.clone(),
|
||||||
receivers: self.alert_receiver.clone(),
|
receivers: self.alert_receiver.clone(),
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ impl<
|
|||||||
info!("Deploying {} to target {target:?}", self.application.name());
|
info!("Deploying {} to target {target:?}", self.application.name());
|
||||||
|
|
||||||
let score = ArgoHelmScore {
|
let score = ArgoHelmScore {
|
||||||
namespace: self.application.name().to_string(),
|
namespace: format!("{}", self.application.name()),
|
||||||
openshift: true,
|
openshift: true,
|
||||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
||||||
@@ -198,8 +198,8 @@ impl<
|
|||||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
name: self.application.name().to_string(),
|
name: format!("{}", self.application.name()),
|
||||||
namespace: self.application.name().to_string(),
|
namespace: format!("{}", self.application.name()),
|
||||||
})],
|
})],
|
||||||
};
|
};
|
||||||
score
|
score
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use std::sync::Arc;
|
|||||||
use crate::modules::application::{
|
use crate::modules::application::{
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
|
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
||||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
@@ -17,7 +18,7 @@ use crate::{
|
|||||||
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
topology::{HelmCommand, K8sclient, Topology, tenant::TenantManager},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::prometheus::prometheus::PrometheusApplicationMonitoring,
|
modules::prometheus::prometheus::PrometheusMonitoring,
|
||||||
topology::oberservability::monitoring::AlertReceiver,
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -41,7 +42,7 @@ impl<
|
|||||||
+ MultiTargetTopology
|
+ MultiTargetTopology
|
||||||
+ Ingress
|
+ Ingress
|
||||||
+ std::fmt::Debug
|
+ std::fmt::Debug
|
||||||
+ PrometheusApplicationMonitoring<RHOBObservability>,
|
+ PrometheusMonitoring<RHOBObservability>,
|
||||||
> ApplicationFeature<T> for Monitoring
|
> ApplicationFeature<T> for Monitoring
|
||||||
{
|
{
|
||||||
async fn ensure_installed(
|
async fn ensure_installed(
|
||||||
|
|||||||
@@ -194,10 +194,10 @@ impl RustWebapp {
|
|||||||
Some(body_full(tar_data.into())),
|
Some(body_full(tar_data.into())),
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(msg) = image_build_stream.next().await {
|
while let Some(mut msg) = image_build_stream.next().await {
|
||||||
trace!("Got bollard msg {msg:?}");
|
trace!("Got bollard msg {msg:?}");
|
||||||
match msg {
|
match msg {
|
||||||
Ok(msg) => {
|
Ok(mut msg) => {
|
||||||
if let Some(progress) = msg.progress_detail {
|
if let Some(progress) = msg.progress_detail {
|
||||||
info!(
|
info!(
|
||||||
"Build progress {}/{}",
|
"Build progress {}/{}",
|
||||||
@@ -511,23 +511,25 @@ ingress:
|
|||||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||||
|
|
||||||
// Create templates/_helpers.tpl
|
// Create templates/_helpers.tpl
|
||||||
let helpers_tpl = r#"
|
let helpers_tpl = format!(
|
||||||
{{/*
|
r#"
|
||||||
|
{{{{/*
|
||||||
Expand the name of the chart.
|
Expand the name of the chart.
|
||||||
*/}}
|
*/}}}}
|
||||||
{{- define "chart.name" -}}
|
{{{{- define "chart.name" -}}}}
|
||||||
{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
{{{{- default .Chart.Name $.Values.nameOverride | trunc 63 | trimSuffix "-" }}}}
|
||||||
{{- end }}
|
{{{{- end }}}}
|
||||||
|
|
||||||
{{/*
|
{{{{/*
|
||||||
Create a default fully qualified app name.
|
Create a default fully qualified app name.
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
*/}}
|
*/}}}}
|
||||||
{{- define "chart.fullname" -}}
|
{{{{- define "chart.fullname" -}}}}
|
||||||
{{- $name := default .Chart.Name $.Values.nameOverride }}
|
{{{{- $name := default .Chart.Name $.Values.nameOverride }}}}
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
{{{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}}}
|
||||||
{{- end }}
|
{{{{- end }}}}
|
||||||
"#.to_string();
|
"#
|
||||||
|
);
|
||||||
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
|
fs::write(templates_dir.join("_helpers.tpl"), helpers_tpl)?;
|
||||||
|
|
||||||
// Create templates/service.yaml
|
// Create templates/service.yaml
|
||||||
|
|||||||
209
harmony/src/modules/cert_manager/cluster_issuer.rs
Normal file
209
harmony/src/modules/cert_manager/cluster_issuer.rs
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use kube::{CustomResource, api::ObjectMeta};
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ClusterIssuerScore {
|
||||||
|
email: String,
|
||||||
|
server: String,
|
||||||
|
issuer_name: String,
|
||||||
|
namespace: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for ClusterIssuerScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"ClusterIssuerScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(ClusterIssuerInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ClusterIssuerInterpret {
|
||||||
|
score: ClusterIssuerScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient> Interpret<T> for ClusterIssuerInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
self.apply_cluster_issuer(topology.k8s_client().await.unwrap())
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("ClusterIssuer")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterIssuerInterpret {
|
||||||
|
async fn validate_cert_manager(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let cert_manager = "cert-manager".to_string();
|
||||||
|
let operator_namespace = "openshift-operators".to_string();
|
||||||
|
match client
|
||||||
|
.get_deployment(&cert_manager, Some(&operator_namespace))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(deployment)) => {
|
||||||
|
if let Some(status) = deployment.status {
|
||||||
|
let ready_count = status.ready_replicas.unwrap_or(0);
|
||||||
|
if ready_count >= 1 {
|
||||||
|
return Ok(Outcome::success(format!(
|
||||||
|
"'{}' is ready with {} replica(s).",
|
||||||
|
&cert_manager, ready_count
|
||||||
|
)));
|
||||||
|
} else {
|
||||||
|
return Err(InterpretError::new(
|
||||||
|
"cert-manager operator not ready in cluster".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"failed to get deployment status {} in ns {}",
|
||||||
|
&cert_manager, &operator_namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => Err(InterpretError::new(format!(
|
||||||
|
"Deployment '{}' not found in namespace '{}'.",
|
||||||
|
&cert_manager, &operator_namespace
|
||||||
|
))),
|
||||||
|
Err(e) => Err(InterpretError::new(format!(
|
||||||
|
"Failed to query for deployment '{}': {}",
|
||||||
|
&cert_manager, e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_cluster_issuer(&self) -> Result<ClusterIssuer, InterpretError> {
|
||||||
|
let issuer_name = &self.score.issuer_name;
|
||||||
|
let email = &self.score.email;
|
||||||
|
let server = &self.score.server;
|
||||||
|
let namespace = &self.score.namespace;
|
||||||
|
let cluster_issuer = ClusterIssuer {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(issuer_name.to_string()),
|
||||||
|
namespace: Some(namespace.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: ClusterIssuerSpec {
|
||||||
|
acme: AcmeSpec {
|
||||||
|
email: email.to_string(),
|
||||||
|
private_key_secret_ref: PrivateKeySecretRef {
|
||||||
|
name: issuer_name.to_string(),
|
||||||
|
},
|
||||||
|
server: server.to_string(),
|
||||||
|
solvers: vec![SolverSpec {
|
||||||
|
http01: Some(Http01Solver {
|
||||||
|
ingress: Http01Ingress {
|
||||||
|
class: "nginx".to_string(),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(cluster_issuer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn apply_cluster_issuer(
|
||||||
|
&self,
|
||||||
|
client: Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let namespace = self.score.namespace.clone();
|
||||||
|
self.validate_cert_manager(&client).await?;
|
||||||
|
let cluster_issuer = self.build_cluster_issuer().unwrap();
|
||||||
|
client
|
||||||
|
.apply_yaml(
|
||||||
|
&serde_yaml::to_value(cluster_issuer).unwrap(),
|
||||||
|
Some(&namespace),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"successfully deployed cluster operator: {} in namespace: {}",
|
||||||
|
self.score.issuer_name, self.score.namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "cert-manager.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "ClusterIssuer",
|
||||||
|
plural = "clusterissuers"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ClusterIssuerSpec {
|
||||||
|
pub acme: AcmeSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct AcmeSpec {
|
||||||
|
pub email: String,
|
||||||
|
pub private_key_secret_ref: PrivateKeySecretRef,
|
||||||
|
pub server: String,
|
||||||
|
pub solvers: Vec<SolverSpec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct PrivateKeySecretRef {
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SolverSpec {
|
||||||
|
pub http01: Option<Http01Solver>,
|
||||||
|
// Other solver types (e.g., dns01) would go here as Options
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Http01Solver {
|
||||||
|
pub ingress: Http01Ingress,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Http01Ingress {
|
||||||
|
pub class: String,
|
||||||
|
}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::{collections::HashMap, str::FromStr};
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
|
use harmony_macros::hurl;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
@@ -33,7 +34,7 @@ impl<T: Topology + HelmCommand> Score<T> for CertManagerHelmScore {
|
|||||||
install_only: true,
|
install_only: true,
|
||||||
repository: Some(HelmRepository::new(
|
repository: Some(HelmRepository::new(
|
||||||
"jetstack".to_string(),
|
"jetstack".to_string(),
|
||||||
Url::parse("https://charts.jetstack.io").unwrap(),
|
hurl!("https://charts.jetstack.io"),
|
||||||
true,
|
true,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
|
pub mod cluster_issuer;
|
||||||
mod helm;
|
mod helm;
|
||||||
pub use helm::*;
|
pub use helm::*;
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use crate::score::Score;
|
|||||||
use crate::topology::{HelmCommand, Topology};
|
use crate::topology::{HelmCommand, Topology};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
use harmony_types::net::Url;
|
||||||
use helm_wrapper_rs;
|
use helm_wrapper_rs;
|
||||||
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
use helm_wrapper_rs::blocking::{DefaultHelmExecutor, HelmExecutor};
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
@@ -15,7 +16,6 @@ use std::path::Path;
|
|||||||
use std::process::{Command, Output, Stdio};
|
use std::process::{Command, Output, Stdio};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use temp_file::TempFile;
|
use temp_file::TempFile;
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct HelmRepository {
|
pub struct HelmRepository {
|
||||||
@@ -78,7 +78,8 @@ impl HelmChartInterpret {
|
|||||||
repo.name, repo.url, repo.force_update
|
repo.name, repo.url, repo.force_update
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut add_args = vec!["repo", "add", &repo.name, repo.url.as_str()];
|
let repo_url = repo.url.to_string();
|
||||||
|
let mut add_args = vec!["repo", "add", &repo.name, &repo_url];
|
||||||
if repo.force_update {
|
if repo.force_update {
|
||||||
add_args.push("--force-update");
|
add_args.push("--force-update");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,363 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use log::debug;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::io::ErrorKind;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::{Command, Output};
|
|
||||||
use temp_dir::{self, TempDir};
|
|
||||||
use temp_file::TempFile;
|
|
||||||
|
|
||||||
use crate::data::Version;
|
|
||||||
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
|
||||||
use crate::inventory::Inventory;
|
|
||||||
use crate::score::Score;
|
|
||||||
use crate::topology::{HelmCommand, K8sclient, Topology};
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HelmCommandExecutor {
|
|
||||||
pub env: HashMap<String, String>,
|
|
||||||
pub path: Option<PathBuf>,
|
|
||||||
pub args: Vec<String>,
|
|
||||||
pub api_versions: Option<Vec<String>>,
|
|
||||||
pub kube_version: String,
|
|
||||||
pub debug: Option<bool>,
|
|
||||||
pub globals: HelmGlobals,
|
|
||||||
pub chart: HelmChart,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HelmGlobals {
|
|
||||||
pub chart_home: Option<PathBuf>,
|
|
||||||
pub config_home: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct HelmChart {
|
|
||||||
pub name: String,
|
|
||||||
pub version: Option<String>,
|
|
||||||
pub repo: Option<String>,
|
|
||||||
pub release_name: Option<String>,
|
|
||||||
pub namespace: Option<String>,
|
|
||||||
pub additional_values_files: Vec<PathBuf>,
|
|
||||||
pub values_file: Option<PathBuf>,
|
|
||||||
pub values_inline: Option<String>,
|
|
||||||
pub include_crds: Option<bool>,
|
|
||||||
pub skip_hooks: Option<bool>,
|
|
||||||
pub api_versions: Option<Vec<String>>,
|
|
||||||
pub kube_version: Option<String>,
|
|
||||||
pub name_template: String,
|
|
||||||
pub skip_tests: Option<bool>,
|
|
||||||
pub debug: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HelmCommandExecutor {
|
|
||||||
pub fn generate(mut self) -> Result<String, std::io::Error> {
|
|
||||||
if self.globals.chart_home.is_none() {
|
|
||||||
self.globals.chart_home = Some(PathBuf::from("charts"));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self
|
|
||||||
.clone()
|
|
||||||
.chart
|
|
||||||
.clone()
|
|
||||||
.chart_exists_locally(self.clone().globals.chart_home.unwrap())
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
if self.chart.repo.is_none() {
|
|
||||||
return Err(std::io::Error::other(
|
|
||||||
"Chart doesn't exist locally and no repo specified",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
self.clone().run_command(
|
|
||||||
self.chart
|
|
||||||
.clone()
|
|
||||||
.pull_command(self.globals.chart_home.clone().unwrap()),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let out = self.clone().run_command(
|
|
||||||
self.chart
|
|
||||||
.clone()
|
|
||||||
.helm_args(self.globals.chart_home.clone().unwrap()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// TODO: don't use unwrap here
|
|
||||||
let s = String::from_utf8(out.stdout).unwrap();
|
|
||||||
debug!("helm stderr: {}", String::from_utf8(out.stderr).unwrap());
|
|
||||||
debug!("helm status: {}", out.status);
|
|
||||||
debug!("helm output: {s}");
|
|
||||||
|
|
||||||
let clean = s.split_once("---").unwrap().1;
|
|
||||||
|
|
||||||
Ok(clean.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn version(self) -> Result<String, std::io::Error> {
|
|
||||||
let out = self.run_command(vec![
|
|
||||||
"version".to_string(),
|
|
||||||
"-c".to_string(),
|
|
||||||
"--short".to_string(),
|
|
||||||
])?;
|
|
||||||
|
|
||||||
// TODO: don't use unwrap
|
|
||||||
Ok(String::from_utf8(out.stdout).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn run_command(mut self, mut args: Vec<String>) -> Result<Output, std::io::Error> {
|
|
||||||
if let Some(d) = self.debug
|
|
||||||
&& d
|
|
||||||
{
|
|
||||||
args.push("--debug".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = if let Some(p) = self.path {
|
|
||||||
p
|
|
||||||
} else {
|
|
||||||
PathBuf::from("helm")
|
|
||||||
};
|
|
||||||
|
|
||||||
let config_home = match self.globals.config_home {
|
|
||||||
Some(p) => p,
|
|
||||||
None => PathBuf::from(TempDir::new()?.path()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(yaml_str) = self.chart.values_inline {
|
|
||||||
let tf: TempFile = temp_file::with_contents(yaml_str.as_bytes());
|
|
||||||
self.chart
|
|
||||||
.additional_values_files
|
|
||||||
.push(PathBuf::from(tf.path()));
|
|
||||||
};
|
|
||||||
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_CONFIG_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_CACHE_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
self.env.insert(
|
|
||||||
"HELM_DATA_HOME".to_string(),
|
|
||||||
config_home.to_str().unwrap().to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Command::new(path).envs(self.env).args(args).output()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HelmChart {
|
|
||||||
pub fn chart_exists_locally(self, chart_home: PathBuf) -> Option<PathBuf> {
|
|
||||||
let chart_path =
|
|
||||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + &self.name.to_string());
|
|
||||||
|
|
||||||
if chart_path.exists() {
|
|
||||||
Some(chart_path)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pull_command(self, chart_home: PathBuf) -> Vec<String> {
|
|
||||||
let mut args = vec![
|
|
||||||
"pull".to_string(),
|
|
||||||
"--untar".to_string(),
|
|
||||||
"--untardir".to_string(),
|
|
||||||
chart_home.to_str().unwrap().to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
match self.repo {
|
|
||||||
Some(r) => {
|
|
||||||
if r.starts_with("oci://") {
|
|
||||||
args.push(
|
|
||||||
r.trim_end_matches("/").to_string() + "/" + self.name.clone().as_str(),
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
args.push("--repo".to_string());
|
|
||||||
args.push(r.to_string());
|
|
||||||
|
|
||||||
args.push(self.name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => args.push(self.name),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(v) = self.version {
|
|
||||||
args.push("--version".to_string());
|
|
||||||
args.push(v.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn helm_args(self, chart_home: PathBuf) -> Vec<String> {
|
|
||||||
let mut args: Vec<String> = vec!["template".to_string()];
|
|
||||||
|
|
||||||
match self.release_name {
|
|
||||||
Some(rn) => args.push(rn.to_string()),
|
|
||||||
None => args.push("--generate-name".to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
args.push(
|
|
||||||
PathBuf::from(chart_home.to_str().unwrap().to_string() + "/" + self.name.as_str())
|
|
||||||
.to_str()
|
|
||||||
.unwrap()
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(n) = self.namespace {
|
|
||||||
args.push("--namespace".to_string());
|
|
||||||
args.push(n.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(f) = self.values_file {
|
|
||||||
args.push("-f".to_string());
|
|
||||||
args.push(f.to_str().unwrap().to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
for f in self.additional_values_files {
|
|
||||||
args.push("-f".to_string());
|
|
||||||
args.push(f.to_str().unwrap().to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(vv) = self.api_versions {
|
|
||||||
for v in vv {
|
|
||||||
args.push("--api-versions".to_string());
|
|
||||||
args.push(v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(kv) = self.kube_version {
|
|
||||||
args.push("--kube-version".to_string());
|
|
||||||
args.push(kv);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(crd) = self.include_crds
|
|
||||||
&& crd
|
|
||||||
{
|
|
||||||
args.push("--include-crds".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(st) = self.skip_tests
|
|
||||||
&& st
|
|
||||||
{
|
|
||||||
args.push("--skip-tests".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(sh) = self.skip_hooks
|
|
||||||
&& sh
|
|
||||||
{
|
|
||||||
args.push("--no-hooks".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(d) = self.debug
|
|
||||||
&& d
|
|
||||||
{
|
|
||||||
args.push("--debug".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct HelmChartScoreV2 {
|
|
||||||
pub chart: HelmChart,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Score<T> for HelmChartScoreV2 {
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(HelmChartInterpretV2 {
|
|
||||||
score: self.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"{} {} HelmChartScoreV2",
|
|
||||||
self.chart
|
|
||||||
.release_name
|
|
||||||
.clone()
|
|
||||||
.unwrap_or("Unknown".to_string()),
|
|
||||||
self.chart.name
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
|
||||||
pub struct HelmChartInterpretV2 {
|
|
||||||
pub score: HelmChartScoreV2,
|
|
||||||
}
|
|
||||||
impl HelmChartInterpretV2 {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for HelmChartInterpretV2 {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let _ns = self
|
|
||||||
.score
|
|
||||||
.chart
|
|
||||||
.namespace
|
|
||||||
.as_ref()
|
|
||||||
.unwrap_or_else(|| todo!("Get namespace from active kubernetes cluster"));
|
|
||||||
|
|
||||||
let helm_executor = HelmCommandExecutor {
|
|
||||||
env: HashMap::new(),
|
|
||||||
path: None,
|
|
||||||
args: vec![],
|
|
||||||
api_versions: None,
|
|
||||||
kube_version: "v1.33.0".to_string(),
|
|
||||||
debug: Some(false),
|
|
||||||
globals: HelmGlobals {
|
|
||||||
chart_home: None,
|
|
||||||
config_home: None,
|
|
||||||
},
|
|
||||||
chart: self.score.chart.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// let mut helm_options = Vec::new();
|
|
||||||
// if self.score.create_namespace {
|
|
||||||
// helm_options.push(NonBlankString::from_str("--create-namespace").unwrap());
|
|
||||||
// }
|
|
||||||
|
|
||||||
let res = helm_executor.generate();
|
|
||||||
|
|
||||||
let _output = match res {
|
|
||||||
Ok(output) => output,
|
|
||||||
Err(err) => return Err(InterpretError::new(err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: implement actually applying the YAML from the templating in the generate function to a k8s cluster, having trouble passing in straight YAML into the k8s client
|
|
||||||
|
|
||||||
// let k8s_resource = k8s_openapi::serde_json::from_str(output.as_str()).unwrap();
|
|
||||||
|
|
||||||
// let client = topology
|
|
||||||
// .k8s_client()
|
|
||||||
// .await
|
|
||||||
// .expect("Environment should provide enough information to instanciate a client")
|
|
||||||
// .apply_namespaced(&vec![output], Some(ns.to_string().as_str()));
|
|
||||||
// match client.apply_yaml(output) {
|
|
||||||
// Ok(_) => return Ok(Outcome::success("Helm chart deployed".to_string())),
|
|
||||||
// Err(e) => return Err(InterpretError::new(e)),
|
|
||||||
// }
|
|
||||||
|
|
||||||
Ok(Outcome::success("Helm chart deployed".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::HelmCommand
|
|
||||||
}
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,2 +1 @@
|
|||||||
pub mod chart;
|
pub mod chart;
|
||||||
pub mod command;
|
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ impl<T: Topology + HttpServer> Interpret<T> for StaticFilesHttpInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for f in self.score.files.iter() {
|
for f in self.score.files.iter() {
|
||||||
http_server.serve_file_content(f).await?
|
http_server.serve_file_content(&f).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
http_server.commit_config().await?;
|
http_server.commit_config().await?;
|
||||||
|
|||||||
@@ -74,7 +74,11 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
|
|
||||||
match ans {
|
match ans {
|
||||||
Ok(choice) => {
|
Ok(choice) => {
|
||||||
info!("Selected {} as the bootstrap node.", choice.summary());
|
info!(
|
||||||
|
"Selected {} as the {:?} node.",
|
||||||
|
choice.summary(),
|
||||||
|
self.score.role
|
||||||
|
);
|
||||||
host_repo
|
host_repo
|
||||||
.save_role_mapping(&self.score.role, &choice)
|
.save_role_mapping(&self.score.role, &choice)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -90,10 +94,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
"Failed to select node for role {:?} : {}",
|
"Failed to select node for role {:?} : {}",
|
||||||
self.score.role, e
|
self.score.role, e
|
||||||
);
|
);
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!("Could not select host : {e}")));
|
||||||
"Could not select host : {}",
|
|
||||||
e
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,157 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use k8s_openapi::{
|
||||||
|
api::core::v1::{Affinity, Toleration},
|
||||||
|
apimachinery::pkg::apis::meta::v1::ObjectMeta,
|
||||||
|
};
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "CatalogSource",
|
||||||
|
plural = "catalogsources",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct CatalogSourceSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub address: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config_map: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub description: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub display_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub grpc_pod_config: Option<GrpcPodConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub icon: Option<Icon>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub image: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<i64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub publisher: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub run_as_root: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub secrets: Option<Vec<String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub source_type: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub update_strategy: Option<UpdateStrategy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrpcPodConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub affinity: Option<Affinity>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub extract_content: Option<ExtractContent>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub memory_target: Option<Value>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority_class_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub security_context_config: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ExtractContent {
|
||||||
|
pub cache_dir: String,
|
||||||
|
pub catalog_dir: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Icon {
|
||||||
|
pub base64data: String,
|
||||||
|
pub mediatype: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct UpdateStrategy {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub registry_poll: Option<RegistryPoll>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RegistryPoll {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub interval: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSource {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: CatalogSourceSpec {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSourceSpec {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
mod catalogsources_operators_coreos_com;
|
||||||
|
pub use catalogsources_operators_coreos_com::*;
|
||||||
|
mod subscriptions_operators_coreos_com;
|
||||||
|
pub use subscriptions_operators_coreos_com::*;
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use kube::CustomResource;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "Subscription",
|
||||||
|
plural = "subscriptions",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub channel: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<SubscriptionConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub install_plan_approval: Option<String>,
|
||||||
|
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
pub source: String,
|
||||||
|
|
||||||
|
pub source_namespace: String,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub starting_csv: Option<String>,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub env: Option<Vec<k8s_openapi::api::core::v1::EnvVar>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<std::collections::BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<k8s_openapi::api::core::v1::Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Subscription {
|
||||||
|
fn default() -> Self {
|
||||||
|
Subscription {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: SubscriptionSpec::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SubscriptionSpec {
|
||||||
|
fn default() -> SubscriptionSpec {
|
||||||
|
SubscriptionSpec {
|
||||||
|
name: String::new(),
|
||||||
|
source: String::new(),
|
||||||
|
source_namespace: String::new(),
|
||||||
|
channel: None,
|
||||||
|
config: None,
|
||||||
|
install_plan_approval: None,
|
||||||
|
starting_csv: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mod operatorhub;
|
||||||
|
pub use operatorhub::*;
|
||||||
|
pub mod crd;
|
||||||
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Write operatorhub catalog score
|
||||||
|
// for now this will only support on OKD with the default catalog and operatorhub setup and does not verify OLM state or anything else. Very opinionated and bare-bones to start
|
||||||
|
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::interpret::Interpret;
|
||||||
|
use crate::modules::k8s::apps::crd::{
|
||||||
|
CatalogSource, CatalogSourceSpec, RegistryPoll, UpdateStrategy,
|
||||||
|
};
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Installs the CatalogSource in a cluster which already has the required services and CRDs installed.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use harmony::modules::k8s::apps::OperatorHubCatalogSourceScore;
|
||||||
|
///
|
||||||
|
/// let score = OperatorHubCatalogSourceScore::default();
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Required services:
|
||||||
|
/// - catalog-operator
|
||||||
|
/// - olm-operator
|
||||||
|
///
|
||||||
|
/// They are installed by default with OKD/Openshift
|
||||||
|
///
|
||||||
|
/// **Warning** : this initial implementation does not manage the dependencies. They must already
|
||||||
|
/// exist in the cluster.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct OperatorHubCatalogSourceScore {
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
pub image: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OperatorHubCatalogSourceScore {
|
||||||
|
pub fn new(name: &str, namespace: &str, image: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
image: image.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OperatorHubCatalogSourceScore {
|
||||||
|
/// This default implementation will create this k8s resource :
|
||||||
|
///
|
||||||
|
/// ```yaml
|
||||||
|
/// apiVersion: operators.coreos.com/v1alpha1
|
||||||
|
/// kind: CatalogSource
|
||||||
|
/// metadata:
|
||||||
|
/// name: operatorhubio-catalog
|
||||||
|
/// namespace: openshift-marketplace
|
||||||
|
/// spec:
|
||||||
|
/// sourceType: grpc
|
||||||
|
/// image: quay.io/operatorhubio/catalog:latest
|
||||||
|
/// displayName: Operatorhub Operators
|
||||||
|
/// publisher: OperatorHub.io
|
||||||
|
/// updateStrategy:
|
||||||
|
/// registryPoll:
|
||||||
|
/// interval: 60m
|
||||||
|
/// ```
|
||||||
|
fn default() -> Self {
|
||||||
|
OperatorHubCatalogSourceScore {
|
||||||
|
name: "operatorhubio-catalog".to_string(),
|
||||||
|
namespace: "openshift-marketplace".to_string(),
|
||||||
|
image: "quay.io/operatorhubio/catalog:latest".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for OperatorHubCatalogSourceScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let metadata = ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(self.namespace.clone()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = CatalogSourceSpec {
|
||||||
|
source_type: Some("grpc".to_string()),
|
||||||
|
image: Some(self.image.clone()),
|
||||||
|
display_name: Some("Operatorhub Operators".to_string()),
|
||||||
|
publisher: Some("OperatorHub.io".to_string()),
|
||||||
|
update_strategy: Some(UpdateStrategy {
|
||||||
|
registry_poll: Some(RegistryPoll {
|
||||||
|
interval: Some("60m".to_string()),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
..CatalogSourceSpec::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let catalog_source = CatalogSource {
|
||||||
|
metadata,
|
||||||
|
spec: spec,
|
||||||
|
};
|
||||||
|
|
||||||
|
K8sResourceScore::single(catalog_source, Some(self.namespace.clone())).create_interpret()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("OperatorHubCatalogSourceScore({})", self.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
19
harmony/src/modules/k8s/failover.rs
Normal file
19
harmony/src/modules/k8s/failover.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use log::warn;
|
||||||
|
|
||||||
|
use crate::topology::{FailoverTopology, K8sclient, k8s::K8sClient};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: K8sclient> K8sclient for FailoverTopology<T> {
|
||||||
|
// TODO figure out how to structure this properly. This gives access only to the primary k8s
|
||||||
|
// client, which will work in many cases but is clearly not good enough for all uses cases
|
||||||
|
// where k8s_client can be used. Logging a warning for now.
|
||||||
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
|
warn!(
|
||||||
|
"Failover topology k8s_client capability currently defers to the primary only. Make sure to check this is OK for you"
|
||||||
|
);
|
||||||
|
self.primary.k8s_client().await
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,6 @@
|
|||||||
|
pub mod apps;
|
||||||
pub mod deployment;
|
pub mod deployment;
|
||||||
|
mod failover;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
pub mod namespace;
|
pub mod namespace;
|
||||||
pub mod resource;
|
pub mod resource;
|
||||||
|
|||||||
@@ -38,13 +38,15 @@ impl<
|
|||||||
+ 'static
|
+ 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Clone,
|
+ Clone,
|
||||||
T: Topology,
|
T: Topology + K8sclient,
|
||||||
> Score<T> for K8sResourceScore<K>
|
> Score<T> for K8sResourceScore<K>
|
||||||
where
|
where
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
todo!()
|
Box::new(K8sResourceInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -77,7 +79,33 @@ where
|
|||||||
_inventory: &Inventory,
|
_inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
info!("Applying {} resources", self.score.resource.len());
|
// TODO improve this log
|
||||||
|
let resource_names: Vec<String> = self
|
||||||
|
.score
|
||||||
|
.resource
|
||||||
|
.iter()
|
||||||
|
.map(|r| {
|
||||||
|
format!(
|
||||||
|
"{}{}",
|
||||||
|
r.meta()
|
||||||
|
.name
|
||||||
|
.as_ref()
|
||||||
|
.map(|n| format!("{n}"))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
r.meta()
|
||||||
|
.namespace
|
||||||
|
.as_ref()
|
||||||
|
.map(|ns| format!("@{}", ns))
|
||||||
|
.unwrap_or_default()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Applying {} resources : {}",
|
||||||
|
resource_names.len(),
|
||||||
|
resource_names.join(", ")
|
||||||
|
);
|
||||||
topology
|
topology
|
||||||
.k8s_client()
|
.k8s_client()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -11,8 +11,10 @@ pub mod k8s;
|
|||||||
pub mod lamp;
|
pub mod lamp;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
pub mod monitoring;
|
pub mod monitoring;
|
||||||
|
pub mod network;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
|
pub mod postgresql;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
pub mod storage;
|
pub mod storage;
|
||||||
pub mod tenant;
|
pub mod tenant;
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
modules::{
|
||||||
application::Application,
|
application::Application,
|
||||||
monitoring::kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
monitoring::{
|
||||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
grafana::grafana::Grafana, kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
||||||
|
},
|
||||||
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
topology::{
|
||||||
|
K8sclient, Topology,
|
||||||
|
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct ApplicationMonitoringScore {
|
pub struct ApplicationMonitoringScore {
|
||||||
@@ -24,12 +26,16 @@ pub struct ApplicationMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<CRDPrometheus>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
impl<T: Topology + PrometheusMonitoring<CRDPrometheus> + K8sclient + Grafana> Score<T>
|
||||||
for ApplicationMonitoringScore
|
for ApplicationMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
Box::new(ApplicationMonitoringInterpret {
|
debug!("creating alerting interpret");
|
||||||
score: self.clone(),
|
Box::new(AlertingInterpret {
|
||||||
|
sender: self.sender.clone(),
|
||||||
|
receivers: self.receivers.clone(),
|
||||||
|
rules: vec![],
|
||||||
|
scrape_targets: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,55 +46,3 @@ impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Score<T>
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ApplicationMonitoringInterpret {
|
|
||||||
score: ApplicationMonitoringScore,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<CRDPrometheus>> Interpret<T>
|
|
||||||
for ApplicationMonitoringInterpret
|
|
||||||
{
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let result = topology
|
|
||||||
.install_prometheus(
|
|
||||||
&self.score.sender,
|
|
||||||
inventory,
|
|
||||||
Some(self.score.receivers.clone()),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(outcome) => match outcome {
|
|
||||||
PreparationOutcome::Success { details: _ } => {
|
|
||||||
Ok(Outcome::success("Prometheus installed".into()))
|
|
||||||
}
|
|
||||||
PreparationOutcome::Noop => {
|
|
||||||
Ok(Outcome::noop("Prometheus installation skipped".into()))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => Err(InterpretError::from(err)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::ApplicationMonitoring
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,8 +9,10 @@ use crate::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::Application,
|
application::Application,
|
||||||
monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability,
|
monitoring::kube_prometheus::crd::{
|
||||||
prometheus::prometheus::PrometheusApplicationMonitoring,
|
crd_alertmanager_config::CRDPrometheus, rhob_alertmanager_config::RHOBObservability,
|
||||||
|
},
|
||||||
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
topology::{PreparationOutcome, Topology, oberservability::monitoring::AlertReceiver},
|
||||||
@@ -24,7 +26,7 @@ pub struct ApplicationRHOBMonitoringScore {
|
|||||||
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
pub receivers: Vec<Box<dyn AlertReceiver<RHOBObservability>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Score<T>
|
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Score<T>
|
||||||
for ApplicationRHOBMonitoringScore
|
for ApplicationRHOBMonitoringScore
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
@@ -47,7 +49,7 @@ pub struct ApplicationRHOBMonitoringInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + PrometheusApplicationMonitoring<RHOBObservability>> Interpret<T>
|
impl<T: Topology + PrometheusMonitoring<RHOBObservability>> Interpret<T>
|
||||||
for ApplicationRHOBMonitoringInterpret
|
for ApplicationRHOBMonitoringInterpret
|
||||||
{
|
{
|
||||||
async fn execute(
|
async fn execute(
|
||||||
|
|||||||
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
17
harmony/src/modules/monitoring/grafana/grafana.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use k8s_openapi::Resource;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
inventory::Inventory,
|
||||||
|
topology::{PreparationError, PreparationOutcome},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Grafana {
|
||||||
|
async fn ensure_grafana_operator(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
|
||||||
|
async fn install_grafana(&self) -> Result<PreparationOutcome, PreparationError>;
|
||||||
|
}
|
||||||
@@ -1,27 +1,28 @@
|
|||||||
|
use harmony_macros::hurl;
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use std::str::FromStr;
|
use std::{collections::HashMap, str::FromStr};
|
||||||
|
|
||||||
use crate::modules::helm::chart::HelmChartScore;
|
use crate::modules::helm::chart::{HelmChartScore, HelmRepository};
|
||||||
|
|
||||||
pub fn grafana_helm_chart_score(ns: &str) -> HelmChartScore {
|
|
||||||
let values = r#"
|
|
||||||
rbac:
|
|
||||||
namespaced: true
|
|
||||||
sidecar:
|
|
||||||
dashboards:
|
|
||||||
enabled: true
|
|
||||||
"#
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
|
pub fn grafana_helm_chart_score(ns: &str, namespace_scope: bool) -> HelmChartScore {
|
||||||
|
let mut values_overrides = HashMap::new();
|
||||||
|
values_overrides.insert(
|
||||||
|
NonBlankString::from_str("namespaceScope").unwrap(),
|
||||||
|
namespace_scope.to_string(),
|
||||||
|
);
|
||||||
HelmChartScore {
|
HelmChartScore {
|
||||||
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
namespace: Some(NonBlankString::from_str(ns).unwrap()),
|
||||||
release_name: NonBlankString::from_str("grafana").unwrap(),
|
release_name: NonBlankString::from_str("grafana-operator").unwrap(),
|
||||||
chart_name: NonBlankString::from_str("oci://ghcr.io/grafana/helm-charts/grafana").unwrap(),
|
chart_name: NonBlankString::from_str("grafana/grafana-operator").unwrap(),
|
||||||
chart_version: None,
|
chart_version: None,
|
||||||
values_overrides: None,
|
values_overrides: Some(values_overrides),
|
||||||
values_yaml: Some(values.to_string()),
|
values_yaml: None,
|
||||||
create_namespace: true,
|
create_namespace: true,
|
||||||
install_only: true,
|
install_only: true,
|
||||||
repository: None,
|
repository: Some(HelmRepository::new(
|
||||||
|
"grafana".to_string(),
|
||||||
|
hurl!("https://grafana.github.io/helm-charts"),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +1,2 @@
|
|||||||
|
pub mod grafana;
|
||||||
pub mod helm;
|
pub mod helm;
|
||||||
|
|||||||
@@ -1,12 +1,25 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::topology::{
|
use crate::{
|
||||||
|
interpret::{InterpretError, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::{
|
||||||
|
monitoring::{
|
||||||
|
grafana::grafana::Grafana, kube_prometheus::crd::service_monitor::ServiceMonitor,
|
||||||
|
},
|
||||||
|
prometheus::prometheus::PrometheusMonitoring,
|
||||||
|
},
|
||||||
|
topology::{
|
||||||
|
K8sclient, Topology,
|
||||||
|
installable::Installable,
|
||||||
k8s::K8sClient,
|
k8s::K8sClient,
|
||||||
oberservability::monitoring::{AlertReceiver, AlertSender},
|
oberservability::monitoring::{AlertReceiver, AlertSender, ScrapeTarget},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
@@ -26,6 +39,7 @@ pub struct AlertmanagerConfigSpec {
|
|||||||
pub struct CRDPrometheus {
|
pub struct CRDPrometheus {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
pub client: Arc<K8sClient>,
|
pub client: Arc<K8sClient>,
|
||||||
|
pub service_monitor: Vec<ServiceMonitor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AlertSender for CRDPrometheus {
|
impl AlertSender for CRDPrometheus {
|
||||||
@@ -40,6 +54,12 @@ impl Clone for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Clone for Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
self.clone_box()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
@@ -48,3 +68,24 @@ impl Serialize for Box<dyn AlertReceiver<CRDPrometheus>> {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + PrometheusMonitoring<CRDPrometheus> + Grafana> Installable<T>
|
||||||
|
for CRDPrometheus
|
||||||
|
{
|
||||||
|
async fn configure(&self, inventory: &Inventory, topology: &T) -> Result<(), InterpretError> {
|
||||||
|
topology.ensure_grafana_operator(inventory).await?;
|
||||||
|
topology.ensure_prometheus_operator(self, inventory).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_installed(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
topology.install_grafana().await?;
|
||||||
|
topology.install_prometheus(&self, inventory, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -103,9 +103,34 @@ pub struct GrafanaDashboardSpec {
|
|||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub resync_period: Option<String>,
|
pub resync_period: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub datasources: Option<Vec<GrafanaDashboardDatasource>>,
|
||||||
|
|
||||||
pub instance_selector: LabelSelector,
|
pub instance_selector: LabelSelector,
|
||||||
|
|
||||||
pub json: String,
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub json: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub grafana_com: Option<GrafanaCom>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDashboardDatasource {
|
||||||
|
pub input_name: String,
|
||||||
|
pub datasource_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaCom {
|
||||||
|
pub id: u32,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub revision: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
@@ -126,20 +151,79 @@ pub struct GrafanaDatasourceSpec {
|
|||||||
pub allow_cross_namespace_import: Option<bool>,
|
pub allow_cross_namespace_import: Option<bool>,
|
||||||
|
|
||||||
pub datasource: GrafanaDatasourceConfig,
|
pub datasource: GrafanaDatasourceConfig,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub values_from: Option<Vec<GrafanaValueFrom>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaValueFrom {
|
||||||
|
pub target_path: String,
|
||||||
|
pub value_from: GrafanaValueSource,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaValueSource {
|
||||||
|
pub secret_key_ref: GrafanaSecretKeyRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaSecretKeyRef {
|
||||||
|
pub name: String,
|
||||||
|
pub key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct GrafanaDatasourceConfig {
|
pub struct GrafanaDatasourceConfig {
|
||||||
pub access: String,
|
pub access: String,
|
||||||
pub database: Option<String>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub json_data: Option<BTreeMap<String, String>>,
|
pub database: Option<String>,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub r#type: String,
|
pub r#type: String,
|
||||||
pub url: String,
|
pub url: String,
|
||||||
|
/// Represents jsonData in the GrafanaDatasource spec
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub json_data: Option<GrafanaDatasourceJsonData>,
|
||||||
|
|
||||||
|
/// Represents secureJsonData (secrets)
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub secure_json_data: Option<GrafanaDatasourceSecureJsonData>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub is_default: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub editable: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDatasourceJsonData {
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub time_interval: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub http_header_name1: Option<String>,
|
||||||
|
|
||||||
|
/// Disable TLS skip verification (false = verify)
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tls_skip_verify: Option<bool>,
|
||||||
|
|
||||||
|
/// Auth type - set to "forward" for OpenShift OAuth identity
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub oauth_pass_thru: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrafanaDatasourceSecureJsonData {
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub http_header_value1: Option<String>,
|
||||||
|
}
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, Default)]
|
||||||
|
|||||||
@@ -0,0 +1,187 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::monitoring::kube_prometheus::crd::{
|
||||||
|
crd_alertmanager_config::CRDPrometheus, crd_prometheuses::LabelSelector,
|
||||||
|
},
|
||||||
|
topology::oberservability::monitoring::ScrapeTarget,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
#[kube(
|
||||||
|
group = "monitoring.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "ScrapeConfig",
|
||||||
|
plural = "scrapeconfigs",
|
||||||
|
namespaced
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ScrapeConfigSpec {
|
||||||
|
/// List of static configurations.
|
||||||
|
pub static_configs: Option<Vec<StaticConfig>>,
|
||||||
|
|
||||||
|
/// Kubernetes service discovery.
|
||||||
|
pub kubernetes_sd_configs: Option<Vec<KubernetesSDConfig>>,
|
||||||
|
|
||||||
|
/// HTTP-based service discovery.
|
||||||
|
pub http_sd_configs: Option<Vec<HttpSDConfig>>,
|
||||||
|
|
||||||
|
/// File-based service discovery.
|
||||||
|
pub file_sd_configs: Option<Vec<FileSDConfig>>,
|
||||||
|
|
||||||
|
/// DNS-based service discovery.
|
||||||
|
pub dns_sd_configs: Option<Vec<DnsSDConfig>>,
|
||||||
|
|
||||||
|
/// Consul service discovery.
|
||||||
|
pub consul_sd_configs: Option<Vec<ConsulSDConfig>>,
|
||||||
|
|
||||||
|
/// Relabeling configuration applied to discovered targets.
|
||||||
|
pub relabel_configs: Option<Vec<RelabelConfig>>,
|
||||||
|
|
||||||
|
/// Metric relabeling configuration applied to scraped samples.
|
||||||
|
pub metric_relabel_configs: Option<Vec<RelabelConfig>>,
|
||||||
|
|
||||||
|
/// Path to scrape metrics from (defaults to `/metrics`).
|
||||||
|
pub metrics_path: Option<String>,
|
||||||
|
|
||||||
|
/// Interval at which Prometheus scrapes targets (e.g., "30s").
|
||||||
|
pub scrape_interval: Option<String>,
|
||||||
|
|
||||||
|
/// Timeout for scraping (e.g., "10s").
|
||||||
|
pub scrape_timeout: Option<String>,
|
||||||
|
|
||||||
|
/// Optional job name override.
|
||||||
|
pub job_name: Option<String>,
|
||||||
|
|
||||||
|
/// Optional scheme (http or https).
|
||||||
|
pub scheme: Option<String>,
|
||||||
|
|
||||||
|
/// Authorization paramaters for snmp walk
|
||||||
|
pub params: Option<Params>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Static configuration section of a ScrapeConfig.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct StaticConfig {
|
||||||
|
pub targets: Vec<String>,
|
||||||
|
|
||||||
|
pub labels: Option<LabelSelector>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Relabeling configuration for target or metric relabeling.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RelabelConfig {
|
||||||
|
pub source_labels: Option<Vec<String>>,
|
||||||
|
pub separator: Option<String>,
|
||||||
|
pub target_label: Option<String>,
|
||||||
|
pub regex: Option<String>,
|
||||||
|
pub modulus: Option<u64>,
|
||||||
|
pub replacement: Option<String>,
|
||||||
|
pub action: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Kubernetes service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct KubernetesSDConfig {
|
||||||
|
///"pod", "service", "endpoints"pub role: String,
|
||||||
|
pub namespaces: Option<NamespaceSelector>,
|
||||||
|
pub selectors: Option<Vec<LabelSelector>>,
|
||||||
|
pub api_server: Option<String>,
|
||||||
|
pub bearer_token_file: Option<String>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Namespace selector for Kubernetes service discovery.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NamespaceSelector {
|
||||||
|
pub any: Option<bool>,
|
||||||
|
pub match_names: Option<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HTTP-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct HttpSDConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub basic_auth: Option<BasicAuth>,
|
||||||
|
pub authorization: Option<Authorization>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// File-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct FileSDConfig {
|
||||||
|
pub files: Vec<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DNS-based service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct DnsSDConfig {
|
||||||
|
pub names: Vec<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub type_: Option<String>, // SRV, A, AAAA
|
||||||
|
pub port: Option<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consul service discovery configuration.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ConsulSDConfig {
|
||||||
|
pub server: String,
|
||||||
|
pub services: Option<Vec<String>>,
|
||||||
|
pub scheme: Option<String>,
|
||||||
|
pub datacenter: Option<String>,
|
||||||
|
pub tag_separator: Option<String>,
|
||||||
|
pub refresh_interval: Option<String>,
|
||||||
|
pub tls_config: Option<TLSConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Basic authentication credentials.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct BasicAuth {
|
||||||
|
pub username: String,
|
||||||
|
pub password: Option<String>,
|
||||||
|
pub password_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bearer token or other auth mechanisms.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Authorization {
|
||||||
|
pub credentials: Option<String>,
|
||||||
|
pub credentials_file: Option<String>,
|
||||||
|
pub type_: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TLS configuration for secure scraping.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct TLSConfig {
|
||||||
|
pub ca_file: Option<String>,
|
||||||
|
pub cert_file: Option<String>,
|
||||||
|
pub key_file: Option<String>,
|
||||||
|
pub server_name: Option<String>,
|
||||||
|
pub insecure_skip_verify: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Authorization parameters for SNMP walk.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Params {
|
||||||
|
pub auth: Option<Vec<String>>,
|
||||||
|
pub module: Option<Vec<String>>,
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@ pub mod crd_default_rules;
|
|||||||
pub mod crd_grafana;
|
pub mod crd_grafana;
|
||||||
pub mod crd_prometheus_rules;
|
pub mod crd_prometheus_rules;
|
||||||
pub mod crd_prometheuses;
|
pub mod crd_prometheuses;
|
||||||
|
pub mod crd_scrape_config;
|
||||||
pub mod grafana_default_dashboard;
|
pub mod grafana_default_dashboard;
|
||||||
pub mod grafana_operator;
|
pub mod grafana_operator;
|
||||||
pub mod prometheus_operator;
|
pub mod prometheus_operator;
|
||||||
|
|||||||
@@ -1,8 +1,12 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
||||||
|
LabelSelector, PrometheusSpec,
|
||||||
|
};
|
||||||
|
|
||||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ impl<T: Topology + HelmCommand + TenantManager> Score<T> for HelmPrometheusAlert
|
|||||||
sender: KubePrometheus { config },
|
sender: KubePrometheus { config },
|
||||||
receivers: self.receivers.clone(),
|
receivers: self.receivers.clone(),
|
||||||
rules: self.rules.clone(),
|
rules: self.rules.clone(),
|
||||||
|
scrape_targets: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -6,3 +6,4 @@ pub mod kube_prometheus;
|
|||||||
pub mod ntfy;
|
pub mod ntfy;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
|
pub mod scrape_target;
|
||||||
|
|||||||
@@ -100,11 +100,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
|
|
||||||
info!("deploying ntfy...");
|
info!("deploying ntfy...");
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready(
|
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
|
||||||
"ntfy".to_string(),
|
|
||||||
Some(self.score.namespace.as_str()),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
info!("ntfy deployed");
|
info!("ntfy deployed");
|
||||||
|
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ impl Prometheus {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ns) = namespace.as_deref() {
|
if let Some(ns) = namespace.as_deref() {
|
||||||
grafana_helm_chart_score(ns)
|
grafana_helm_chart_score(ns, false)
|
||||||
.interpret(inventory, topology)
|
.interpret(inventory, topology)
|
||||||
.await
|
.await
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
1
harmony/src/modules/monitoring/scrape_target/mod.rs
Normal file
1
harmony/src/modules/monitoring/scrape_target/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod server;
|
||||||
80
harmony/src/modules/monitoring/scrape_target/server.rs
Normal file
80
harmony/src/modules/monitoring/scrape_target/server.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
interpret::{InterpretError, Outcome},
|
||||||
|
modules::monitoring::kube_prometheus::crd::{
|
||||||
|
crd_alertmanager_config::CRDPrometheus,
|
||||||
|
crd_scrape_config::{Params, RelabelConfig, ScrapeConfig, ScrapeConfigSpec, StaticConfig},
|
||||||
|
},
|
||||||
|
topology::oberservability::monitoring::ScrapeTarget,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct Server {
|
||||||
|
pub name: String,
|
||||||
|
pub ip: IpAddr,
|
||||||
|
pub auth: String,
|
||||||
|
pub module: String,
|
||||||
|
pub domain: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ScrapeTarget<CRDPrometheus> for Server {
|
||||||
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
|
let scrape_config_spec = ScrapeConfigSpec {
|
||||||
|
static_configs: Some(vec![StaticConfig {
|
||||||
|
targets: vec![self.ip.to_string()],
|
||||||
|
labels: None,
|
||||||
|
}]),
|
||||||
|
scrape_interval: Some("2m".to_string()),
|
||||||
|
kubernetes_sd_configs: None,
|
||||||
|
http_sd_configs: None,
|
||||||
|
file_sd_configs: None,
|
||||||
|
dns_sd_configs: None,
|
||||||
|
params: Some(Params {
|
||||||
|
auth: Some(vec![self.auth.clone()]),
|
||||||
|
module: Some(vec![self.module.clone()]),
|
||||||
|
}),
|
||||||
|
consul_sd_configs: None,
|
||||||
|
relabel_configs: Some(vec![RelabelConfig {
|
||||||
|
action: None,
|
||||||
|
source_labels: Some(vec!["__address__".to_string()]),
|
||||||
|
separator: None,
|
||||||
|
target_label: Some("__param_target".to_string()),
|
||||||
|
regex: None,
|
||||||
|
replacement: Some(format!("snmp.{}:31080", self.domain.clone())),
|
||||||
|
modulus: None,
|
||||||
|
}]),
|
||||||
|
metric_relabel_configs: None,
|
||||||
|
metrics_path: Some("/snmp".to_string()),
|
||||||
|
scrape_timeout: Some("2m".to_string()),
|
||||||
|
job_name: Some(format!("snmp_exporter/cloud/{}", self.name.clone())),
|
||||||
|
scheme: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let scrape_config = ScrapeConfig {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(sender.namespace.clone()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: scrape_config_spec,
|
||||||
|
};
|
||||||
|
sender
|
||||||
|
.client
|
||||||
|
.apply(&scrape_config, Some(&sender.namespace.clone()))
|
||||||
|
.await?;
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"installed scrape target {}",
|
||||||
|
self.name.clone()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clone_box(&self) -> Box<dyn ScrapeTarget<CRDPrometheus>> {
|
||||||
|
Box::new(self.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
18
harmony/src/modules/network/failover.rs
Normal file
18
harmony/src/modules/network/failover.rs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::warn;
|
||||||
|
|
||||||
|
use crate::topology::{FailoverTopology, TlsRoute, TlsRouter};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: TlsRouter> TlsRouter for FailoverTopology<T> {
|
||||||
|
async fn get_wildcard_domain(&self) -> Result<Option<String>, String> {todo!()}
|
||||||
|
|
||||||
|
/// Returns the port that this router exposes externally.
|
||||||
|
async fn get_router_port(&self) -> u16 {todo!()}
|
||||||
|
async fn install_route(&self, config: TlsRoute) -> Result<(), String> {
|
||||||
|
warn!(
|
||||||
|
"Failover topology TlsRouter capability currently defers to the primary only. Make sure to check this is OK for you. The Replica Topology WILL NOT be affected here"
|
||||||
|
);
|
||||||
|
self.primary.install_route(config).await
|
||||||
|
}
|
||||||
|
}
|
||||||
3
harmony/src/modules/network/mod.rs
Normal file
3
harmony/src/modules/network/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mod failover;
|
||||||
|
mod tls_router;
|
||||||
|
pub use tls_router::*;
|
||||||
91
harmony/src/modules/network/tls_router.rs
Normal file
91
harmony/src/modules/network/tls_router.rs
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::data::Version;
|
||||||
|
use crate::domain::topology::router::{TlsRoute, TlsRouter};
|
||||||
|
use crate::interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome};
|
||||||
|
use crate::inventory::Inventory;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Score for provisioning a TLS passthrough route.
|
||||||
|
/// Exposes backend services via TLS passthrough (L4 TCP/SNI forwarding).
|
||||||
|
/// Agnostic to underlying router impl (OKD Route, HAProxy, Envoy, etc.).
|
||||||
|
///
|
||||||
|
/// TlsPassthroughScore relies on the TlsRouter Capability for its entire functionnality,
|
||||||
|
/// the implementation depends entirely on how the Topology implements it.
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
/// ```
|
||||||
|
/// use harmony::modules::network::TlsPassthroughScore;
|
||||||
|
/// use harmony::topology::router::TlsRoute;
|
||||||
|
/// let score = TlsPassthroughScore {
|
||||||
|
/// route: TlsRoute {
|
||||||
|
/// backend: "postgres-cluster-rw".to_string(),
|
||||||
|
/// hostname: "postgres-rw.example.com".to_string(),
|
||||||
|
/// target_port: 5432,
|
||||||
|
/// },
|
||||||
|
/// };
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// # Hint
|
||||||
|
///
|
||||||
|
/// **This TlsPassthroughScore should be used whenever possible.** It is effectively
|
||||||
|
/// an abstraction over the concept of tls passthrough, and it will allow much more flexible
|
||||||
|
/// usage over multiple types of Topology than using a lower level module such as
|
||||||
|
/// OKDTlsPassthroughScore.
|
||||||
|
///
|
||||||
|
/// On the other hand, some implementation specific options might not be available or practical
|
||||||
|
/// to use through this high level TlsPassthroughScore.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct TlsPassthroughScore {
|
||||||
|
pub route: TlsRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Score<T> for TlsPassthroughScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(TlsPassthroughInterpret {
|
||||||
|
tls_route: self.route.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"TlsRouterScore({}:{} → {})",
|
||||||
|
self.route.backend, self.route.target_port, self.route.hostname
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Custom interpret: provisions the TLS passthrough route on the topology.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct TlsPassthroughInterpret {
|
||||||
|
tls_route: TlsRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + K8sclient + TlsRouter + Send + Sync> Interpret<T> for TlsPassthroughInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("TlsRouterInterpret")
|
||||||
|
}
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn execute(&self, _inventory: &Inventory, topo: &T) -> Result<Outcome, InterpretError> {
|
||||||
|
topo.install_route(self.tls_route.clone())
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"TLS route installed: {} → {}:{}",
|
||||||
|
self.tls_route.hostname, self.tls_route.backend, self.tls_route.target_port
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -52,12 +52,6 @@ pub struct OKDSetup02BootstrapInterpret {
|
|||||||
status: InterpretStatus,
|
status: InterpretStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for OKDSetup02BootstrapInterpret {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDSetup02BootstrapInterpret {
|
impl OKDSetup02BootstrapInterpret {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
let version = Version::from("1.0.0").unwrap();
|
let version = Version::from("1.0.0").unwrap();
|
||||||
@@ -104,9 +98,9 @@ impl OKDSetup02BootstrapInterpret {
|
|||||||
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
|
InterpretError::new(format!("Failed to create okd installation directory : {e}"))
|
||||||
})?;
|
})?;
|
||||||
if !exit_status.success() {
|
if !exit_status.success() {
|
||||||
return Err(InterpretError::new(
|
return Err(InterpretError::new(format!(
|
||||||
"Failed to create okd installation directory".to_string(),
|
"Failed to create okd installation directory"
|
||||||
));
|
)));
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"Created OKD installation directory {}",
|
"Created OKD installation directory {}",
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ pub struct OKDSetup03ControlPlaneScore {}
|
|||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup03ControlPlaneInterpret::new(self.clone()))
|
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
@@ -38,17 +38,15 @@ impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct OKDSetup03ControlPlaneInterpret {
|
pub struct OKDSetup03ControlPlaneInterpret {
|
||||||
score: OKDSetup03ControlPlaneScore,
|
|
||||||
version: Version,
|
version: Version,
|
||||||
status: InterpretStatus,
|
status: InterpretStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OKDSetup03ControlPlaneInterpret {
|
impl OKDSetup03ControlPlaneInterpret {
|
||||||
pub fn new(score: OKDSetup03ControlPlaneScore) -> Self {
|
pub fn new() -> Self {
|
||||||
let version = Version::from("1.0.0").unwrap();
|
let version = Version::from("1.0.0").unwrap();
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
score,
|
|
||||||
status: InterpretStatus::QUEUED,
|
status: InterpretStatus::QUEUED,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,7 +157,7 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
}
|
}
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
debug!("[ControlPlane] iPXE content template:\n{}", content);
|
debug!("[ControlPlane] iPXE content template:\n{content}");
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
// Create and apply an iPXE boot file for each node.
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
@@ -189,16 +187,13 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
/// Prompts the user to reboot the target control plane nodes.
|
/// Prompts the user to reboot the target control plane nodes.
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||||
info!(
|
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
||||||
"[ControlPlane] Requesting reboot for control plane nodes: {:?}",
|
|
||||||
node_ids
|
|
||||||
);
|
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
let confirmation = inquire::Confirm::new(
|
||||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||||
)
|
)
|
||||||
.prompt()
|
.prompt()
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||||
|
|
||||||
if !confirmation {
|
if !confirmation {
|
||||||
return Err(InterpretError::new(
|
return Err(InterpretError::new(
|
||||||
@@ -208,19 +203,6 @@ impl OKDSetup03ControlPlaneInterpret {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Placeholder for automating network bonding configuration.
|
|
||||||
async fn persist_network_bond(&self) -> Result<(), InterpretError> {
|
|
||||||
// Generate MC or NNCP from inventory NIC data; apply via ignition or post-join.
|
|
||||||
info!("[ControlPlane] Ensuring persistent bonding via MachineConfig/NNCP");
|
|
||||||
inquire::Confirm::new(
|
|
||||||
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {}", e)))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -259,9 +241,6 @@ impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|||||||
// 4. Reboot the nodes to start the OS installation.
|
// 4. Reboot the nodes to start the OS installation.
|
||||||
self.reboot_targets(&nodes).await?;
|
self.reboot_targets(&nodes).await?;
|
||||||
|
|
||||||
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
|
||||||
self.persist_network_bond().await?;
|
|
||||||
|
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
// and for the cluster operators to become available. This would be similar to
|
// and for the cluster operators to become available. This would be similar to
|
||||||
// the `wait-for bootstrap-complete` command.
|
// the `wait-for bootstrap-complete` command.
|
||||||
|
|||||||
@@ -77,6 +77,8 @@ impl OKDBootstrapLoadBalancerScore {
|
|||||||
address: topology.bootstrap_host.ip.to_string(),
|
address: topology.bootstrap_host.ip.to_string(),
|
||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
backend.dedup();
|
||||||
backend
|
backend
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user