Compare commits
69 Commits
a12d12aa4f
...
snapshot-l
| Author | SHA1 | Date | |
|---|---|---|---|
| f2ca97b3bf | |||
| dbfae8539f | |||
| 9359d43fe1 | |||
| e026ad4d69 | |||
| 98f098ffa4 | |||
| fdf1dfaa30 | |||
| 4f8cd0c1cb | |||
| 004b35f08e | |||
| 2b19d8c3e8 | |||
| 745479c667 | |||
| 2d89e08877 | |||
| e5bd866c09 | |||
| 0973f76701 | |||
| fd69a2d101 | |||
| 5cce9f8e74 | |||
| 07e610c54a | |||
| 03e98a51e3 | |||
| 22875fe8f3 | |||
| c6f859f973 | |||
| bbf28a1a28 | |||
| f242aafebb | |||
| 3e14ebd62c | |||
| 1b19638df4 | |||
| d39b1957cd | |||
| bfdb11b217 | |||
| d5fadf4f44 | |||
| 357ca93d90 | |||
| 8103932f23 | |||
| 9617e1cfde | |||
| 50bd5c5bba | |||
| a953284386 | |||
| bfde5f58ed | |||
| 43a17811cc | |||
| 93ac89157a | |||
| 29c82db70d | |||
| 8ee3f8a4ad | |||
| d3634a6313 | |||
| 83c1cc82b6 | |||
| a0a8d5277c | |||
| 43b04edbae | |||
| 755a4b7749 | |||
| 5953bc58f4 | |||
| 51a5afbb6d | |||
| 66d346a10c | |||
| 06a004a65d | |||
| 9d4e6acac0 | |||
| 4ff57062ae | |||
| 50ce54ea66 | |||
|
|
827a49e56b | ||
| c2fa4f1869 | |||
| ee278ac817 | |||
| 09a06f136e | |||
| 5f147fa672 | |||
| c80ede706b | |||
| 9ba939bde1 | |||
| 44bf21718c | |||
| 5e1580e5c1 | |||
| 1802b10ddf | |||
| 008b03f979 | |||
| 9f7b90d182 | |||
| dc70266b5a | |||
| 8fb755cda1 | |||
| cb7a64b160 | |||
| afdd511a6d | |||
| 5ab58f0253 | |||
| 5af13800b7 | |||
| 8126b233d8 | |||
| e5eb7fde9f | |||
| dd3f07e5b7 |
83
Cargo.lock
generated
83
Cargo.lock
generated
@@ -690,6 +690,41 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "brocade-snmp-server"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"base64 0.22.1",
|
||||||
|
"brocade",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "brocade-switch"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"brocade",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "brotli"
|
name = "brotli"
|
||||||
version = "8.0.2"
|
version = "8.0.2"
|
||||||
@@ -1835,6 +1870,21 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-opnsense"
|
name = "example-opnsense"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -1853,6 +1903,25 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "example-opnsense-node-exporter"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"cidr",
|
||||||
|
"env_logger",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_secret",
|
||||||
|
"harmony_secret_derive",
|
||||||
|
"harmony_types",
|
||||||
|
"log",
|
||||||
|
"serde",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "example-pxe"
|
name = "example-pxe"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -2479,6 +2548,19 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "harmony_inventory_builder"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cidr",
|
||||||
|
"harmony",
|
||||||
|
"harmony_cli",
|
||||||
|
"harmony_macros",
|
||||||
|
"harmony_types",
|
||||||
|
"tokio",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "harmony_macros"
|
name = "harmony_macros"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -2544,6 +2626,7 @@ dependencies = [
|
|||||||
name = "harmony_types"
|
name = "harmony_types"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"log",
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
"serde",
|
"serde",
|
||||||
"url",
|
"url",
|
||||||
|
|||||||
114
adr/015-higher-order-topologies.md
Normal file
114
adr/015-higher-order-topologies.md
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# Architecture Decision Record: Higher-Order Topologies
|
||||||
|
|
||||||
|
**Initial Author:** Jean-Gabriel Gill-Couture
|
||||||
|
**Initial Date:** 2025-12-08
|
||||||
|
**Last Updated Date:** 2025-12-08
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Implemented
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
||||||
|
|
||||||
|
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
||||||
|
|
||||||
|
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
||||||
|
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
||||||
|
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
||||||
|
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
||||||
|
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
||||||
|
|
||||||
|
This makes scaling Harmony impractical as ecosystem grows.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
||||||
|
|
||||||
|
````rust
|
||||||
|
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
||||||
|
/// Delegates to primary for queries; orchestrates deploy across both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Deploy primary; extract certs/endpoint;
|
||||||
|
// deploy replica with pg_basebackup + TLS passthrough.
|
||||||
|
// (Full impl logged/elaborated.)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delegate queries to primary.
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Similarly for other capabilities.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker> Docker for FailoverTopology<T> {
|
||||||
|
// Failover Docker orchestration.
|
||||||
|
}
|
||||||
|
````
|
||||||
|
|
||||||
|
**Key properties:**
|
||||||
|
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
||||||
|
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
||||||
|
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
||||||
|
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
||||||
|
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
||||||
|
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
````rust
|
||||||
|
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
||||||
|
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
||||||
|
pg_failover.deploy_pg(config).await;
|
||||||
|
|
||||||
|
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
||||||
|
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
||||||
|
docker_failover.deploy_docker(...).await;
|
||||||
|
|
||||||
|
// ❌ Compile fail: K8sAnywhere !: Docker
|
||||||
|
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
||||||
|
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
||||||
|
````
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
**Pros:**
|
||||||
|
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
||||||
|
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
||||||
|
- **Observable**: Logs trace every step.
|
||||||
|
|
||||||
|
**Cons:**
|
||||||
|
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
||||||
|
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
||||||
|
|
||||||
|
## Alternatives considered
|
||||||
|
|
||||||
|
| Approach | Pros | Cons |
|
||||||
|
|----------|------|------|
|
||||||
|
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
||||||
|
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
||||||
|
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
||||||
|
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
||||||
|
|
||||||
|
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
||||||
|
- `FailoverTopology` in `failover.rs` is first implementation.
|
||||||
153
adr/015-higher-order-topologies/example.rs
Normal file
153
adr/015-higher-order-topologies/example.rs
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
//! Example of Higher-Order Topologies in Harmony.
|
||||||
|
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
||||||
|
//! supported by a sub-topology `T` via blanket trait impls.
|
||||||
|
//!
|
||||||
|
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
||||||
|
//! Users can:
|
||||||
|
//! - Write new `Topology` (impl capabilities on a struct).
|
||||||
|
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
||||||
|
//! - Compile fails if capability missing (safety).
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio;
|
||||||
|
|
||||||
|
/// Capability trait: Deploy and manage PostgreSQL.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait PostgreSQL {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Capability trait: Deploy Docker.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Docker {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for PostgreSQL deployments.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PostgreSQLConfig;
|
||||||
|
|
||||||
|
/// Replication certificates.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ReplicationCerts;
|
||||||
|
|
||||||
|
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct K8sAnywhereTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl PostgreSQL for K8sAnywhereTopology {
|
||||||
|
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Real impl: Use k8s helm chart, operator, etc.
|
||||||
|
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
Ok(ReplicationCerts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Concrete topology: Linux Host (supports Docker).
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct LinuxHostTopology;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Docker for LinuxHostTopology {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Real impl: Install/configure Docker on host.
|
||||||
|
Ok("LinuxHost Docker deployed".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
||||||
|
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
||||||
|
///
|
||||||
|
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
||||||
|
/// - Same for `Docker`, etc. No boilerplate!
|
||||||
|
/// - Compile-time safe: Missing `T: Capability` → error.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
/// Primary sub-topology.
|
||||||
|
pub primary: T,
|
||||||
|
/// Replica sub-topology.
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
||||||
|
/// Delegates reads to primary; deploys to both.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
||||||
|
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||||
|
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
||||||
|
let primary_result = self.primary.deploy(config).await?;
|
||||||
|
let replica_result = self.replica.deploy(config).await?;
|
||||||
|
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||||
|
// Delegate to primary (replica follows).
|
||||||
|
self.primary.get_replication_certs(cluster_name).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blanket impl: Failover Docker if T provides Docker.
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
||||||
|
async fn deploy_docker(&self) -> Result<String, String> {
|
||||||
|
// Orchestrate across primary + replica.
|
||||||
|
let primary_result = self.primary.deploy_docker().await?;
|
||||||
|
let replica_result = self.replica.deploy_docker().await?;
|
||||||
|
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let config = PostgreSQLConfig;
|
||||||
|
|
||||||
|
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
||||||
|
let pg_failover = FailoverTopology {
|
||||||
|
primary: K8sAnywhereTopology,
|
||||||
|
replica: K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
let result = pg_failover.deploy(&config).await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
||||||
|
let docker_failover = FailoverTopology {
|
||||||
|
primary: LinuxHostTopology,
|
||||||
|
replica: LinuxHostTopology,
|
||||||
|
};
|
||||||
|
let result = docker_failover.deploy_docker().await.unwrap();
|
||||||
|
println!("Result: {}", result);
|
||||||
|
|
||||||
|
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
||||||
|
// let invalid = FailoverTopology {
|
||||||
|
// primary: K8sAnywhereTopology,
|
||||||
|
// replica: K8sAnywhereTopology,
|
||||||
|
// };
|
||||||
|
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
||||||
|
// Very clear error message :
|
||||||
|
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
||||||
|
// --> src/main.rs:90:9
|
||||||
|
// |
|
||||||
|
// 4 | pub struct FailoverTopology<T> {
|
||||||
|
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
||||||
|
// ...
|
||||||
|
// 37 | struct K8sAnywhereTopology;
|
||||||
|
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
||||||
|
// ...
|
||||||
|
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
||||||
|
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
||||||
|
// |
|
||||||
|
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
||||||
|
// --> src/main.rs:61:9
|
||||||
|
// |
|
||||||
|
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
||||||
|
// | ^^^^^^ ------ -------------------
|
||||||
|
// | |
|
||||||
|
// | unsatisfied trait bound introduced here
|
||||||
|
// note: the trait `Docker` must be implemented
|
||||||
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,90 @@
|
|||||||
|
# Architecture Decision Record: Global Orchestration Mesh & The Harmony Agent
|
||||||
|
|
||||||
|
**Status:** Proposed
|
||||||
|
**Date:** 2025-12-19
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Harmony is designed to enable a truly decentralized infrastructure where independent clusters—owned by different organizations or running on diverse hardware—can collaborate reliably. This vision combines the decentralization of Web3 with the performance and capabilities of Web2.
|
||||||
|
|
||||||
|
Currently, Harmony operates as a stateless CLI tool, invoked manually or via CI runners. While effective for deployment, this model presents a critical limitation: **a CLI cannot react to real-time events.**
|
||||||
|
|
||||||
|
To achieve automated failover and dynamic workload management, we need a system that is "always on." Relying on manual intervention or scheduled CI jobs to recover from a cluster failure creates unacceptable latency and prevents us from scaling to thousands of nodes.
|
||||||
|
|
||||||
|
Furthermore, we face a challenge in serving diverse workloads:
|
||||||
|
* **Financial workloads** require absolute consistency (CP - Consistency/Partition Tolerance).
|
||||||
|
* **AI/Inference workloads** require maximum availability (AP - Availability/Partition Tolerance).
|
||||||
|
|
||||||
|
There are many more use cases, but those are the two extremes.
|
||||||
|
|
||||||
|
We need a unified architecture that automates cluster coordination and supports both consistency models without requiring a complete re-architecture in the future.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
We propose a fundamental architectural evolution. It has been clear since the start of Harmony that it would be necessary to transition Harmony from a purely ephemeral CLI tool to a system that includes a persistent **Harmony Agent**. This Agent will connect to a **Global Orchestration Mesh** based on a strongly consistent protocol.
|
||||||
|
|
||||||
|
The proposal consists of four key pillars:
|
||||||
|
|
||||||
|
### 1. The Harmony Agent (New Component)
|
||||||
|
We will develop a long-running process (Daemon/Agent) to be deployed alongside workloads.
|
||||||
|
* **Shift from CLI:** Unlike the CLI, which applies configuration and exits, the Agent maintains a persistent connection to the mesh.
|
||||||
|
* **Responsibility:** It actively monitors cluster health, participates in consensus, and executes lifecycle commands (start/stop/fence) instantly when the mesh dictates a state change.
|
||||||
|
|
||||||
|
### 2. The Technology: NATS JetStream
|
||||||
|
We will utilize **NATS JetStream** as the underlying transport and consensus layer for the Agent and the Mesh.
|
||||||
|
* **Why not raw Raft?** Implementing a raw Raft library requires building and maintaining the transport layer, log compaction, snapshotting, and peer discovery manually. NATS JetStream provides a battle-tested, distributed log and Key-Value store (based on Raft) out of the box, along with a high-performance pub/sub system for event propagation.
|
||||||
|
* **Role:** It will act as the "source of truth" for the cluster state.
|
||||||
|
|
||||||
|
### 3. Strong Consistency at the Mesh Layer
|
||||||
|
The mesh will operate with **Strong Consistency** by default.
|
||||||
|
* All critical cluster state changes (topology updates, lease acquisitions, leadership elections) will require consensus among the Agents.
|
||||||
|
* This ensures that in the event of a network partition, we have a mathematical guarantee of which side holds the valid state, preventing data corruption.
|
||||||
|
|
||||||
|
### 4. Public UX: The `FailoverStrategy` Abstraction
|
||||||
|
To keep the user experience stable and simple, we will expose the complexity of the mesh through a high-level configuration API, tentatively called `FailoverStrategy`.
|
||||||
|
|
||||||
|
The user defines the *intent* in their config, and the Harmony Agent automates the *execution*:
|
||||||
|
|
||||||
|
* **`FailoverStrategy::AbsoluteConsistency`**:
|
||||||
|
* *Use Case:* Banking, Transactional DBs.
|
||||||
|
* *Behavior:* If the mesh detects a partition, the Agent on the minority side immediately halts workloads. No split-brain is ever allowed.
|
||||||
|
* **`FailoverStrategy::SplitBrainAllowed`**:
|
||||||
|
* *Use Case:* LLM Inference, Stateless Web Servers.
|
||||||
|
* *Behavior:* If a partition occurs, the Agent keeps workloads running to maximize uptime. State is reconciled when connectivity returns.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
|
||||||
|
**The Necessity of an Agent**
|
||||||
|
You cannot automate what you do not monitor. Moving to an Agent-based model is the only way to achieve sub-second reaction times to infrastructure failures. It transforms Harmony from a deployment tool into a self-healing platform.
|
||||||
|
|
||||||
|
**Scaling & Decentralization**
|
||||||
|
To allow independent clusters to collaborate, they need a shared language. A strongly consistent mesh allows Cluster A (Organization X) and Cluster B (Organization Y) to agree on workload placement without a central authority.
|
||||||
|
|
||||||
|
**Why Strong Consistency First?**
|
||||||
|
It is technically feasible to relax a strongly consistent system to allow for "Split Brain" behavior (AP) when the user requests it. However, it is nearly impossible to take an eventually consistent system and force it to be strongly consistent (CP) later. By starting with strict constraints, we cover the hardest use cases (Finance) immediately.
|
||||||
|
|
||||||
|
**Future Topologies**
|
||||||
|
While our immediate need is `FailoverTopology` (Multi-site), this architecture supports any future topology logic:
|
||||||
|
* **`CostTopology`**: Agents negotiate to route workloads to the cluster with the cheapest spot instances.
|
||||||
|
* **`HorizontalTopology`**: Spreading a single workload across 100 clusters for massive scale.
|
||||||
|
* **`GeoTopology`**: Ensuring data stays within specific legal jurisdictions.
|
||||||
|
|
||||||
|
The mesh provides the *capability* (consensus and messaging); the topology provides the *logic*.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
**Positive**
|
||||||
|
* **Automation:** Eliminates manual failover, enabling massive scale.
|
||||||
|
* **Reliability:** Guarantees data safety for critical workloads by default.
|
||||||
|
* **Flexibility:** A single codebase serves both high-frequency trading and AI inference.
|
||||||
|
* **Stability:** The public API remains abstract, allowing us to optimize the mesh internals without breaking user code.
|
||||||
|
|
||||||
|
**Negative**
|
||||||
|
* **Deployment Complexity:** Users must now deploy and maintain a running service (the Agent) rather than just downloading a binary.
|
||||||
|
* **Engineering Complexity:** Integrating NATS JetStream and handling distributed state machines is significantly more complex than the current CLI logic.
|
||||||
|
|
||||||
|
## Implementation Plan (Short Term)
|
||||||
|
1. **Agent Bootstrap:** Create the initial scaffold for the Harmony Agent (daemon).
|
||||||
|
2. **Mesh Integration:** Prototype NATS JetStream embedding within the Agent.
|
||||||
|
3. **Strategy Implementation:** Add `FailoverStrategy` to the configuration schema and implement the logic in the Agent to read and act on it.
|
||||||
|
4. **Migration:** Transition the current manual failover scripts into event-driven logic handled by the Agent.
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
use brocade::{BrocadeOptions, ssh};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use harmony_types::switch::PortLocation;
|
use harmony_types::switch::PortLocation;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -16,23 +16,28 @@ async fn main() {
|
|||||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||||
|
|
||||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
|
||||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||||
let switch_addresses = vec![ip];
|
let switch_addresses = vec![ip];
|
||||||
|
|
||||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
// let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
.await
|
// .await
|
||||||
.unwrap();
|
// .unwrap();
|
||||||
|
|
||||||
let brocade = brocade::init(
|
let brocade = brocade::init(
|
||||||
&switch_addresses,
|
&switch_addresses,
|
||||||
22,
|
// &config.username,
|
||||||
&config.username,
|
// &config.password,
|
||||||
&config.password,
|
"admin",
|
||||||
Some(BrocadeOptions {
|
"password",
|
||||||
|
BrocadeOptions {
|
||||||
dry_run: true,
|
dry_run: true,
|
||||||
|
ssh: ssh::SshOptions {
|
||||||
|
port: 2222,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.expect("Brocade client failed to connect");
|
.expect("Brocade client failed to connect");
|
||||||
@@ -54,6 +59,7 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
println!("--------------");
|
println!("--------------");
|
||||||
|
todo!();
|
||||||
let channel_name = "1";
|
let channel_name = "1";
|
||||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use super::BrocadeClient;
|
use super::BrocadeClient;
|
||||||
use crate::{
|
use crate::{
|
||||||
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||||
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
PortChannelId, PortOperatingMode, SecurityLevel, parse_brocade_mac_address,
|
||||||
|
shell::BrocadeShell,
|
||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -140,7 +141,7 @@ impl BrocadeClient for FastIronClient {
|
|||||||
|
|
||||||
async fn configure_interfaces(
|
async fn configure_interfaces(
|
||||||
&self,
|
&self,
|
||||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
_interfaces: &Vec<(String, PortOperatingMode)>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
@@ -209,4 +210,20 @@ impl BrocadeClient for FastIronClient {
|
|||||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
"snmp-server view ALL 1 included".into(),
|
||||||
|
"snmp-server group public v3 priv read ALL".into(),
|
||||||
|
format!(
|
||||||
|
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
|
||||||
|
),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,11 +14,12 @@ use async_trait::async_trait;
|
|||||||
use harmony_types::net::MacAddress;
|
use harmony_types::net::MacAddress;
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
mod fast_iron;
|
mod fast_iron;
|
||||||
mod network_operating_system;
|
mod network_operating_system;
|
||||||
mod shell;
|
mod shell;
|
||||||
mod ssh;
|
pub mod ssh;
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug)]
|
#[derive(Default, Clone, Debug)]
|
||||||
pub struct BrocadeOptions {
|
pub struct BrocadeOptions {
|
||||||
@@ -31,6 +32,7 @@ pub struct BrocadeOptions {
|
|||||||
pub struct TimeoutConfig {
|
pub struct TimeoutConfig {
|
||||||
pub shell_ready: Duration,
|
pub shell_ready: Duration,
|
||||||
pub command_execution: Duration,
|
pub command_execution: Duration,
|
||||||
|
pub command_output: Duration,
|
||||||
pub cleanup: Duration,
|
pub cleanup: Duration,
|
||||||
pub message_wait: Duration,
|
pub message_wait: Duration,
|
||||||
}
|
}
|
||||||
@@ -40,6 +42,7 @@ impl Default for TimeoutConfig {
|
|||||||
Self {
|
Self {
|
||||||
shell_ready: Duration::from_secs(10),
|
shell_ready: Duration::from_secs(10),
|
||||||
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
command_execution: Duration::from_secs(60), // Commands like `deploy` (for a LAG) can take a while
|
||||||
|
command_output: Duration::from_secs(5), // Delay to start logging "waiting for command output"
|
||||||
cleanup: Duration::from_secs(10),
|
cleanup: Duration::from_secs(10),
|
||||||
message_wait: Duration::from_millis(500),
|
message_wait: Duration::from_millis(500),
|
||||||
}
|
}
|
||||||
@@ -116,7 +119,7 @@ impl fmt::Display for InterfaceType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
|
||||||
pub enum PortOperatingMode {
|
pub enum PortOperatingMode {
|
||||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||||
Fabric,
|
Fabric,
|
||||||
@@ -139,12 +142,11 @@ pub enum InterfaceStatus {
|
|||||||
|
|
||||||
pub async fn init(
|
pub async fn init(
|
||||||
ip_addresses: &[IpAddr],
|
ip_addresses: &[IpAddr],
|
||||||
port: u16,
|
|
||||||
username: &str,
|
username: &str,
|
||||||
password: &str,
|
password: &str,
|
||||||
options: Option<BrocadeOptions>,
|
options: BrocadeOptions,
|
||||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
let shell = BrocadeShell::init(ip_addresses, username, password, options).await?;
|
||||||
|
|
||||||
let version_info = shell
|
let version_info = shell
|
||||||
.with_session(ExecutionMode::Regular, |session| {
|
.with_session(ExecutionMode::Regular, |session| {
|
||||||
@@ -206,7 +208,7 @@ pub trait BrocadeClient: std::fmt::Debug {
|
|||||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||||
async fn configure_interfaces(
|
async fn configure_interfaces(
|
||||||
&self,
|
&self,
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Scans the existing configuration to find the next available (unused)
|
/// Scans the existing configuration to find the next available (unused)
|
||||||
@@ -235,6 +237,15 @@ pub trait BrocadeClient: std::fmt::Debug {
|
|||||||
ports: &[PortLocation],
|
ports: &[PortLocation],
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Enables Simple Network Management Protocol (SNMP) server for switch
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// * `user_name`: The user name for the snmp server
|
||||||
|
/// * `auth`: The password for authentication process for verifying the identity of a device
|
||||||
|
/// * `des`: The Data Encryption Standard algorithm key
|
||||||
|
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Removes all configuration associated with the specified Port-Channel name.
|
/// Removes all configuration associated with the specified Port-Channel name.
|
||||||
///
|
///
|
||||||
/// This operation should be idempotent; attempting to clear a non-existent
|
/// This operation should be idempotent; attempting to clear a non-existent
|
||||||
@@ -298,6 +309,11 @@ fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
|||||||
Ok(MacAddress(bytes))
|
Ok(MacAddress(bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum SecurityLevel {
|
||||||
|
AuthPriv(String),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
NetworkError(String),
|
NetworkError(String),
|
||||||
|
|||||||
@@ -3,11 +3,12 @@ use std::str::FromStr;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||||
use log::{debug, info};
|
use log::{debug, info};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||||
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||||
parse_brocade_mac_address, shell::BrocadeShell,
|
SecurityLevel, parse_brocade_mac_address, shell::BrocadeShell,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -103,13 +104,37 @@ impl NetworkOperatingSystemClient {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Some(Ok(InterfaceInfo {
|
Some(Ok(InterfaceInfo {
|
||||||
name: format!("{} {}", interface_type, port_location),
|
name: format!("{interface_type} {port_location}"),
|
||||||
port_location,
|
port_location,
|
||||||
interface_type,
|
interface_type,
|
||||||
operating_mode,
|
operating_mode,
|
||||||
status,
|
status,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn map_configure_interfaces_error(&self, err: Error) -> Error {
|
||||||
|
debug!("[Brocade] {err}");
|
||||||
|
|
||||||
|
if let Error::CommandError(message) = &err {
|
||||||
|
if message.contains("switchport")
|
||||||
|
&& message.contains("Cannot configure aggregator member")
|
||||||
|
{
|
||||||
|
let re = Regex::new(r"\(conf-if-([a-zA-Z]+)-([\d/]+)\)#").unwrap();
|
||||||
|
|
||||||
|
if let Some(caps) = re.captures(message) {
|
||||||
|
let interface_type = &caps[1];
|
||||||
|
let port_location = &caps[2];
|
||||||
|
let interface = format!("{interface_type} {port_location}");
|
||||||
|
|
||||||
|
return Error::CommandError(format!(
|
||||||
|
"Cannot configure interface '{interface}', it is a member of a port-channel (LAG)"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -162,7 +187,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
|
|
||||||
async fn configure_interfaces(
|
async fn configure_interfaces(
|
||||||
&self,
|
&self,
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||||
|
|
||||||
@@ -179,9 +204,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
PortOperatingMode::Trunk => {
|
PortOperatingMode::Trunk => {
|
||||||
commands.push("switchport".into());
|
commands.push("switchport".into());
|
||||||
commands.push("switchport mode trunk".into());
|
commands.push("switchport mode trunk".into());
|
||||||
commands.push("no spanning-tree shutdown".into());
|
commands.push("switchport trunk allowed vlan all".into());
|
||||||
|
commands.push("no switchport trunk tag native-vlan".into());
|
||||||
|
commands.push("spanning-tree shutdown".into());
|
||||||
commands.push("no fabric isl enable".into());
|
commands.push("no fabric isl enable".into());
|
||||||
commands.push("no fabric trunk enable".into());
|
commands.push("no fabric trunk enable".into());
|
||||||
|
commands.push("no shutdown".into());
|
||||||
}
|
}
|
||||||
PortOperatingMode::Access => {
|
PortOperatingMode::Access => {
|
||||||
commands.push("switchport".into());
|
commands.push("switchport".into());
|
||||||
@@ -197,11 +225,10 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
commands.push("exit".into());
|
commands.push("exit".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
commands.push("write memory".into());
|
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(|err| self.map_configure_interfaces_error(err))?;
|
||||||
|
|
||||||
info!("[Brocade] Interfaces configured.");
|
info!("[Brocade] Interfaces configured.");
|
||||||
|
|
||||||
@@ -213,7 +240,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
|
|
||||||
let output = self
|
let output = self
|
||||||
.shell
|
.shell
|
||||||
.run_command("show port-channel", ExecutionMode::Regular)
|
.run_command("show port-channel summary", ExecutionMode::Regular)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let used_ids: Vec<u8> = output
|
let used_ids: Vec<u8> = output
|
||||||
@@ -248,7 +275,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
ports: &[PortLocation],
|
ports: &[PortLocation],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
info!(
|
info!(
|
||||||
"[Brocade] Configuring port-channel '{channel_name} {channel_id}' with ports: {ports:?}"
|
"[Brocade] Configuring port-channel '{channel_id} {channel_name}' with ports: {}",
|
||||||
|
ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| format!("{p}"))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", ")
|
||||||
);
|
);
|
||||||
|
|
||||||
let interfaces = self.get_interfaces().await?;
|
let interfaces = self.get_interfaces().await?;
|
||||||
@@ -276,8 +308,6 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
commands.push("exit".into());
|
commands.push("exit".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
commands.push("write memory".into());
|
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
.run_commands(commands, ExecutionMode::Regular)
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -294,7 +324,6 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
"configure terminal".into(),
|
"configure terminal".into(),
|
||||||
format!("no interface port-channel {}", channel_name),
|
format!("no interface port-channel {}", channel_name),
|
||||||
"exit".into(),
|
"exit".into(),
|
||||||
"write memory".into(),
|
|
||||||
];
|
];
|
||||||
|
|
||||||
self.shell
|
self.shell
|
||||||
@@ -304,4 +333,20 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
|||||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||||
|
let commands = vec![
|
||||||
|
"configure terminal".into(),
|
||||||
|
"snmp-server view ALL 1 included".into(),
|
||||||
|
"snmp-server group public v3 priv read ALL".into(),
|
||||||
|
format!(
|
||||||
|
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
|
||||||
|
),
|
||||||
|
"exit".into(),
|
||||||
|
];
|
||||||
|
self.shell
|
||||||
|
.run_commands(commands, ExecutionMode::Regular)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ use tokio::time::timeout;
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BrocadeShell {
|
pub struct BrocadeShell {
|
||||||
ip: IpAddr,
|
ip: IpAddr,
|
||||||
port: u16,
|
|
||||||
username: String,
|
username: String,
|
||||||
password: String,
|
password: String,
|
||||||
options: BrocadeOptions,
|
options: BrocadeOptions,
|
||||||
@@ -27,33 +26,31 @@ pub struct BrocadeShell {
|
|||||||
impl BrocadeShell {
|
impl BrocadeShell {
|
||||||
pub async fn init(
|
pub async fn init(
|
||||||
ip_addresses: &[IpAddr],
|
ip_addresses: &[IpAddr],
|
||||||
port: u16,
|
|
||||||
username: &str,
|
username: &str,
|
||||||
password: &str,
|
password: &str,
|
||||||
options: Option<BrocadeOptions>,
|
options: BrocadeOptions,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let ip = ip_addresses
|
let ip = ip_addresses
|
||||||
.first()
|
.first()
|
||||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||||
|
|
||||||
let base_options = options.unwrap_or_default();
|
let brocade_ssh_client_options =
|
||||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
ssh::try_init_client(username, password, ip, options).await?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
ip: *ip,
|
ip: *ip,
|
||||||
port,
|
|
||||||
username: username.to_string(),
|
username: username.to_string(),
|
||||||
password: password.to_string(),
|
password: password.to_string(),
|
||||||
before_all_commands: vec![],
|
before_all_commands: vec![],
|
||||||
after_all_commands: vec![],
|
after_all_commands: vec![],
|
||||||
options,
|
options: brocade_ssh_client_options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||||
BrocadeSession::open(
|
BrocadeSession::open(
|
||||||
self.ip,
|
self.ip,
|
||||||
self.port,
|
self.options.ssh.port,
|
||||||
&self.username,
|
&self.username,
|
||||||
&self.password,
|
&self.password,
|
||||||
self.options.clone(),
|
self.options.clone(),
|
||||||
@@ -211,7 +208,7 @@ impl BrocadeSession {
|
|||||||
let mut output = Vec::new();
|
let mut output = Vec::new();
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let read_timeout = Duration::from_millis(500);
|
let read_timeout = Duration::from_millis(500);
|
||||||
let log_interval = Duration::from_secs(3);
|
let log_interval = Duration::from_secs(5);
|
||||||
let mut last_log = Instant::now();
|
let mut last_log = Instant::now();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@@ -221,7 +218,9 @@ impl BrocadeSession {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if start.elapsed() > Duration::from_secs(5) && last_log.elapsed() > log_interval {
|
if start.elapsed() > self.options.timeouts.command_output
|
||||||
|
&& last_log.elapsed() > log_interval
|
||||||
|
{
|
||||||
info!("[Brocade] Waiting for command output...");
|
info!("[Brocade] Waiting for command output...");
|
||||||
last_log = Instant::now();
|
last_log = Instant::now();
|
||||||
}
|
}
|
||||||
@@ -276,7 +275,7 @@ impl BrocadeSession {
|
|||||||
let output_lower = output.to_lowercase();
|
let output_lower = output.to_lowercase();
|
||||||
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
if ERROR_PATTERNS.iter().any(|&p| output_lower.contains(p)) {
|
||||||
return Err(Error::CommandError(format!(
|
return Err(Error::CommandError(format!(
|
||||||
"Command '{command}' failed: {}",
|
"Command error: {}",
|
||||||
output.trim()
|
output.trim()
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use std::borrow::Cow;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use log::debug;
|
||||||
use russh::client::Handler;
|
use russh::client::Handler;
|
||||||
use russh::kex::DH_G1_SHA1;
|
use russh::kex::DH_G1_SHA1;
|
||||||
use russh::kex::ECDH_SHA2_NISTP256;
|
use russh::kex::ECDH_SHA2_NISTP256;
|
||||||
@@ -10,29 +11,43 @@ use russh_keys::key::SSH_RSA;
|
|||||||
use super::BrocadeOptions;
|
use super::BrocadeOptions;
|
||||||
use super::Error;
|
use super::Error;
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SshOptions {
|
pub struct SshOptions {
|
||||||
pub preferred_algorithms: russh::Preferred,
|
pub preferred_algorithms: russh::Preferred,
|
||||||
|
pub port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SshOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
preferred_algorithms: Default::default(),
|
||||||
|
port: 22,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SshOptions {
|
impl SshOptions {
|
||||||
fn ecdhsa_sha2_nistp256() -> Self {
|
fn ecdhsa_sha2_nistp256(port: u16) -> Self {
|
||||||
Self {
|
Self {
|
||||||
preferred_algorithms: russh::Preferred {
|
preferred_algorithms: russh::Preferred {
|
||||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||||
key: Cow::Borrowed(&[SSH_RSA]),
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
port,
|
||||||
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn legacy() -> Self {
|
fn legacy(port: u16) -> Self {
|
||||||
Self {
|
Self {
|
||||||
preferred_algorithms: russh::Preferred {
|
preferred_algorithms: russh::Preferred {
|
||||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||||
key: Cow::Borrowed(&[SSH_RSA]),
|
key: Cow::Borrowed(&[SSH_RSA]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
port,
|
||||||
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -57,18 +72,21 @@ pub async fn try_init_client(
|
|||||||
ip: &std::net::IpAddr,
|
ip: &std::net::IpAddr,
|
||||||
base_options: BrocadeOptions,
|
base_options: BrocadeOptions,
|
||||||
) -> Result<BrocadeOptions, Error> {
|
) -> Result<BrocadeOptions, Error> {
|
||||||
|
let mut default = SshOptions::default();
|
||||||
|
default.port = base_options.ssh.port;
|
||||||
let ssh_options = vec![
|
let ssh_options = vec![
|
||||||
SshOptions::default(),
|
default,
|
||||||
SshOptions::ecdhsa_sha2_nistp256(),
|
SshOptions::ecdhsa_sha2_nistp256(base_options.ssh.port),
|
||||||
SshOptions::legacy(),
|
SshOptions::legacy(base_options.ssh.port),
|
||||||
];
|
];
|
||||||
|
|
||||||
for ssh in ssh_options {
|
for ssh in ssh_options {
|
||||||
let opts = BrocadeOptions {
|
let opts = BrocadeOptions {
|
||||||
ssh,
|
ssh: ssh.clone(),
|
||||||
..base_options.clone()
|
..base_options.clone()
|
||||||
};
|
};
|
||||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
debug!("Creating client {ip}:{} {username}", ssh.port);
|
||||||
|
let client = create_client(*ip, ssh.port, username, password, &opts).await;
|
||||||
|
|
||||||
match client {
|
match client {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
|||||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
133
docs/doc-clone-and-restore-coreos.md
Normal file
133
docs/doc-clone-and-restore-coreos.md
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
## Working procedure to clone and restore CoreOS disk from OKD Cluster
|
||||||
|
|
||||||
|
### **Step 1 - take a backup**
|
||||||
|
```
|
||||||
|
sudo dd if=/dev/old of=/dev/backup status=progress
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 2 - clone beginning of old disk to new**
|
||||||
|
```
|
||||||
|
sudo dd if=/dev/old of=/dev/backup status=progress count=1000 bs=1M
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 3 - verify and modify disk partitions**
|
||||||
|
list disk partitions
|
||||||
|
```
|
||||||
|
sgdisk -p /dev/new
|
||||||
|
```
|
||||||
|
if new disk is smaller than old disk and there is space on the xfs partition of the old disk, modify partitions of new disk
|
||||||
|
```
|
||||||
|
gdisk /dev/new
|
||||||
|
```
|
||||||
|
inside of gdisk commands
|
||||||
|
```
|
||||||
|
-v -> verify table
|
||||||
|
-p -> print table
|
||||||
|
-d -> select partition to delete partition
|
||||||
|
-n -> recreate partition with same partition number as deleted partition
|
||||||
|
```
|
||||||
|
For end sector, either specify the new end or just press Enter for maximum available
|
||||||
|
When asked about partition type, enter the same type code (it will show the old one)
|
||||||
|
```
|
||||||
|
p - >to verify
|
||||||
|
w -> to write
|
||||||
|
```
|
||||||
|
make xfs file system for new partition <new4>
|
||||||
|
```
|
||||||
|
sudo mkfs.xfs -f /dev/new4
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 4 - copy old PARTUUID **
|
||||||
|
|
||||||
|
**careful here**
|
||||||
|
get old patuuid:
|
||||||
|
```
|
||||||
|
sgdisk -i <partition_number> /dev/old_disk # Note the "Partition unique GUID"
|
||||||
|
```
|
||||||
|
get labels
|
||||||
|
```
|
||||||
|
sgdisk -p /dev/old_disk # Shows partition names in the table
|
||||||
|
|
||||||
|
blkid /dev/old_disk* # Shows PARTUUIDs and labels for all partitions
|
||||||
|
```
|
||||||
|
set it on new disk
|
||||||
|
```
|
||||||
|
sgdisk -u <partition_number>:<old_partuuid> /dev/sdc
|
||||||
|
```
|
||||||
|
partition name:
|
||||||
|
```
|
||||||
|
sgdisk -c <partition_number>:"<old_name>" /dev/sdc
|
||||||
|
```
|
||||||
|
verify all:
|
||||||
|
```
|
||||||
|
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/old_disk
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 5 - Mount disks and copy files from old to new disk**
|
||||||
|
|
||||||
|
mount files before copy:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir -p /mnt/new
|
||||||
|
mkdir -p /mnt/old
|
||||||
|
mount /dev/old4 /mnt/old
|
||||||
|
mount /dev/new4 /mnt/new
|
||||||
|
```
|
||||||
|
copy:
|
||||||
|
|
||||||
|
with -n flag can run as dry-run
|
||||||
|
```
|
||||||
|
rsync -aAXHvn --numeric-ids /source/ /destination/
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
rsync -aAXHv --numeric-ids /source/ /destination/
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 6 - Set correct UUID for new partition 4**
|
||||||
|
to set uuid with xfs_admin you must unmount first
|
||||||
|
|
||||||
|
unmount old devices
|
||||||
|
```
|
||||||
|
umount /mnt/new
|
||||||
|
umount /mnt/old
|
||||||
|
```
|
||||||
|
|
||||||
|
to set correct uuid for partition 4
|
||||||
|
```
|
||||||
|
blkid /dev/old4
|
||||||
|
```
|
||||||
|
```
|
||||||
|
xfs_admin -U <old_uuid> /dev/new_partition
|
||||||
|
```
|
||||||
|
to set labels
|
||||||
|
get it
|
||||||
|
```
|
||||||
|
sgdisk -i 4 /dev/sda | grep "Partition name"
|
||||||
|
```
|
||||||
|
set it
|
||||||
|
```
|
||||||
|
sgdisk -c 4:"<label_name>" /dev/sdc
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
(check existing with xfs_admin -l /dev/old_partition)
|
||||||
|
Use xfs_admin -L <label> /dev/new_partition
|
||||||
|
```
|
||||||
|
|
||||||
|
### **Step 7 - Verify**
|
||||||
|
|
||||||
|
verify everything:
|
||||||
|
```
|
||||||
|
sgdisk -p /dev/sda # Old disk
|
||||||
|
sgdisk -p /dev/sdc # New disk
|
||||||
|
```
|
||||||
|
```
|
||||||
|
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sda
|
||||||
|
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sdc
|
||||||
|
```
|
||||||
|
```
|
||||||
|
blkid /dev/sda* | grep UUID=
|
||||||
|
blkid /dev/sdc* | grep UUID=
|
||||||
|
```
|
||||||
|
|
||||||
56
docs/doc-remove-worker-flag.md
Normal file
56
docs/doc-remove-worker-flag.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
## **Remove Worker flag from OKD Control Planes**
|
||||||
|
|
||||||
|
### **Context**
|
||||||
|
On OKD user provisioned infrastructure the control plane nodes can have the flag node-role.kubernetes.io/worker which allows non critical workloads to be scheduled on the control-planes
|
||||||
|
|
||||||
|
### **Observed Symptoms**
|
||||||
|
- After adding HAProxy servers to the backend each back end appears down
|
||||||
|
- Traffic is redirected to the control planes instead of workers
|
||||||
|
- The pods router-default are incorrectly applied on the control planes rather than on the workers
|
||||||
|
- Pods are being scheduled on the control planes causing cluster instability
|
||||||
|
|
||||||
|
```
|
||||||
|
ss -tlnp | grep 80
|
||||||
|
```
|
||||||
|
- shows process haproxy is listening at 0.0.0.0:80 on cps
|
||||||
|
- same problem for port 443
|
||||||
|
- In namespace rook-ceph certain pods are deploted on cps rather than on worker nodes
|
||||||
|
|
||||||
|
### **Cause**
|
||||||
|
- when intalling UPI, the roles (master, worker) are not managed by the Machine Config operator and the cps are made schedulable by default.
|
||||||
|
|
||||||
|
### **Diagnostic**
|
||||||
|
check node labels:
|
||||||
|
```
|
||||||
|
oc get nodes --show-labels | grep control-plane
|
||||||
|
```
|
||||||
|
Inspecter kubelet configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
cat /etc/systemd/system/kubelet.service
|
||||||
|
```
|
||||||
|
|
||||||
|
find the line:
|
||||||
|
```
|
||||||
|
--node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node-role.kubernetes.io/worker
|
||||||
|
```
|
||||||
|
→ presence of label worker confirms the problem.
|
||||||
|
|
||||||
|
Verify the flag doesnt come from MCO
|
||||||
|
```
|
||||||
|
oc get machineconfig | grep rendered-master
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
To make the control planes non schedulable you must patch the cluster scheduler resource
|
||||||
|
|
||||||
|
```
|
||||||
|
oc patch scheduler cluster --type merge -p '{"spec":{"mastersSchedulable":false}}'
|
||||||
|
```
|
||||||
|
after the patch is applied the workloads can be deplaced by draining the nodes
|
||||||
|
|
||||||
|
```
|
||||||
|
oc adm cordon <cp-node>
|
||||||
|
oc adm drain <cp-node> --ignore-daemonsets –delete-emptydir-data
|
||||||
|
```
|
||||||
|
|
||||||
105
docs/modules/Multisite_PostgreSQL.md
Normal file
105
docs/modules/Multisite_PostgreSQL.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# Design Document: Harmony PostgreSQL Module
|
||||||
|
|
||||||
|
**Status:** Draft
|
||||||
|
**Last Updated:** 2025-12-01
|
||||||
|
**Context:** Multi-site Data Replication & Orchestration
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
The Harmony PostgreSQL Module provides a high-level abstraction for deploying and managing high-availability PostgreSQL clusters across geographically distributed Kubernetes/OKD sites.
|
||||||
|
|
||||||
|
Instead of manually configuring complex replication slots, firewalls, and operator settings on each cluster, users define a single intent (a **Score**), and Harmony orchestrates the underlying infrastructure (the **Arrangement**) to establish a Primary-Replica architecture.
|
||||||
|
|
||||||
|
Currently, the implementation relies on the **CloudNativePG (CNPG)** operator as the backing engine.
|
||||||
|
|
||||||
|
## 2. Architecture
|
||||||
|
|
||||||
|
### 2.1 The Abstraction Model
|
||||||
|
Following **ADR 003 (Infrastructure Abstraction)**, Harmony separates the *intent* from the *implementation*.
|
||||||
|
|
||||||
|
1. **The Score (Intent):** The user defines a `MultisitePostgreSQL` resource. This describes *what* is needed (e.g., "A Postgres 15 cluster with 10GB storage, Primary on Site A, Replica on Site B").
|
||||||
|
2. **The Interpret (Action):** Harmony MultisitePostgreSQLInterpret processes this Score and orchestrates the deployment on both sites to reach the state defined in the Score.
|
||||||
|
3. **The Capability (Implementation):** The PostgreSQL Capability is implemented by the K8sTopology and the interpret can deploy it, configure it and fetch information about it. The concrete implementation will rely on the mature CloudnativePG operator to manage all the Kubernetes resources required.
|
||||||
|
|
||||||
|
### 2.2 Network Connectivity (TLS Passthrough)
|
||||||
|
|
||||||
|
One of the critical challenges in multi-site orchestration is secure connectivity between clusters that may have dynamic IPs or strict firewalls.
|
||||||
|
|
||||||
|
To solve this, we utilize **OKD/OpenShift Routes with TLS Passthrough**.
|
||||||
|
|
||||||
|
* **Mechanism:** The Primary site exposes a `Route` configured for `termination: passthrough`.
|
||||||
|
* **Routing:** The OpenShift HAProxy router inspects the **SNI (Server Name Indication)** header of the incoming TCP connection to route traffic to the correct PostgreSQL Pod.
|
||||||
|
* **Security:** SSL is **not** terminated at the ingress router. The encrypted stream is passed directly to the PostgreSQL instance. Mutual TLS (mTLS) authentication is handled natively by CNPG between the Primary and Replica instances.
|
||||||
|
* **Dynamic IPs:** Because connections are established via DNS hostnames (the Route URL), this architecture is resilient to dynamic IP changes at the Primary site.
|
||||||
|
|
||||||
|
#### Traffic Flow Diagram
|
||||||
|
|
||||||
|
```text
|
||||||
|
[ Site B: Replica ] [ Site A: Primary ]
|
||||||
|
| |
|
||||||
|
(CNPG Instance) --[Encrypted TCP]--> (OKD HAProxy Router)
|
||||||
|
| (Port 443) |
|
||||||
|
| |
|
||||||
|
| [SNI Inspection]
|
||||||
|
| |
|
||||||
|
| v
|
||||||
|
| (PostgreSQL Primary Pod)
|
||||||
|
| (Port 5432)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Design Decisions
|
||||||
|
|
||||||
|
### Why CloudNativePG?
|
||||||
|
We selected CloudNativePG because it relies exclusively on standard Kubernetes primitives and uses the native PostgreSQL replication protocol (WAL shipping/Streaming). This aligns with Harmony's goal of being "K8s Native."
|
||||||
|
|
||||||
|
### Why TLS Passthrough instead of VPN/NodePort?
|
||||||
|
* **NodePort:** Requires static IPs and opening non-standard ports on the firewall, which violates our security constraints.
|
||||||
|
* **VPN (e.g., Wireguard/Tailscale):** While secure, it introduces significant complexity (sidecars, key management) and external dependencies.
|
||||||
|
* **TLS Passthrough:** Leverages the existing Ingress/Router infrastructure already present in OKD. It requires zero additional software and respects multi-tenancy (Routes are namespaced).
|
||||||
|
|
||||||
|
### Configuration Philosophy (YAGNI)
|
||||||
|
The current design exposes a **generic configuration surface**. Users can configure standard parameters (Storage size, CPU/Memory requests, Postgres version).
|
||||||
|
|
||||||
|
**We explicitly do not expose advanced CNPG or PostgreSQL configurations at this stage.**
|
||||||
|
|
||||||
|
* **Reasoning:** We aim to keep the API surface small and manageable.
|
||||||
|
* **Future Path:** We plan to implement a "pass-through" mechanism to allow sending raw config maps or custom parameters to the underlying engine (CNPG) *only when a concrete use case arises*. Until then, we adhere to the **YAGNI (You Ain't Gonna Need It)** principle to avoid premature optimization and API bloat.
|
||||||
|
|
||||||
|
## 4. Usage Guide
|
||||||
|
|
||||||
|
To deploy a multi-site cluster, apply the `MultisitePostgreSQL` resource to the Harmony Control Plane.
|
||||||
|
|
||||||
|
### Example Manifest
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: harmony.io/v1alpha1
|
||||||
|
kind: MultisitePostgreSQL
|
||||||
|
metadata:
|
||||||
|
name: finance-db
|
||||||
|
namespace: tenant-a
|
||||||
|
spec:
|
||||||
|
version: "15"
|
||||||
|
storage: "10Gi"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "1Gi"
|
||||||
|
|
||||||
|
# Topology Definition
|
||||||
|
topology:
|
||||||
|
primary:
|
||||||
|
site: "site-paris" # The name of the cluster in Harmony
|
||||||
|
replicas:
|
||||||
|
- site: "site-newyork"
|
||||||
|
```
|
||||||
|
|
||||||
|
### What happens next?
|
||||||
|
1. Harmony detects the CR.
|
||||||
|
2. **On Site Paris:** It deploys a CNPG Cluster (Primary) and creates a Passthrough Route `postgres-finance-db.apps.site-paris.example.com`.
|
||||||
|
3. **On Site New York:** It deploys a CNPG Cluster (Replica) configured with `externalClusters` pointing to the Paris Route.
|
||||||
|
4. Data begins replicating immediately over the encrypted channel.
|
||||||
|
|
||||||
|
## 5. Troubleshooting
|
||||||
|
|
||||||
|
* **Connection Refused:** Ensure the Primary site's Route is successfully admitted by the Ingress Controller.
|
||||||
|
* **Certificate Errors:** CNPG manages mTLS automatically. If errors persist, ensure the CA secrets were correctly propagated by Harmony from Primary to Replica namespaces.
|
||||||
BIN
empty_database.sqlite
Normal file
BIN
empty_database.sqlite
Normal file
Binary file not shown.
@@ -27,6 +27,7 @@ async fn main() {
|
|||||||
};
|
};
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "example-monitoring".to_string(),
|
name: "example-monitoring".to_string(),
|
||||||
|
dns: "example-monitoring.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./examples/rust/webapp"),
|
project_root: PathBuf::from("./examples/rust/webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
|||||||
20
examples/brocade_snmp_server/Cargo.toml
Normal file
20
examples/brocade_snmp_server/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[package]
|
||||||
|
name = "brocade-snmp-server"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
|
base64.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
22
examples/brocade_snmp_server/src/main.rs
Normal file
22
examples/brocade_snmp_server/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory, modules::brocade::BrocadeEnableSnmpScore, topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let brocade_snmp_server = BrocadeEnableSnmpScore {
|
||||||
|
switch_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 1, 111))],
|
||||||
|
dry_run: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(brocade_snmp_server)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
19
examples/brocade_switch/Cargo.toml
Normal file
19
examples/brocade_switch/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[package]
|
||||||
|
name = "brocade-switch"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
tokio.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
async-trait.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
log.workspace = true
|
||||||
|
env_logger.workspace = true
|
||||||
|
brocade = { path = "../../brocade" }
|
||||||
157
examples/brocade_switch/src/main.rs
Normal file
157
examples/brocade_switch/src/main.rs
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::{BrocadeOptions, PortOperatingMode};
|
||||||
|
use harmony::{
|
||||||
|
data::Version,
|
||||||
|
infra::brocade::BrocadeSwitchClient,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{
|
||||||
|
HostNetworkConfig, PortConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
|
||||||
|
SwitchError, Topology,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use harmony_macros::ip;
|
||||||
|
use harmony_types::{id::Id, net::MacAddress, switch::PortLocation};
|
||||||
|
use log::{debug, info};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let switch_score = BrocadeSwitchScore {
|
||||||
|
port_channels_to_clear: vec![
|
||||||
|
Id::from_str("17").unwrap(),
|
||||||
|
Id::from_str("19").unwrap(),
|
||||||
|
Id::from_str("18").unwrap(),
|
||||||
|
],
|
||||||
|
ports_to_configure: vec![
|
||||||
|
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
|
||||||
|
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
|
||||||
|
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
SwitchTopology::new().await,
|
||||||
|
vec![Box::new(switch_score)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
struct BrocadeSwitchScore {
|
||||||
|
port_channels_to_clear: Vec<Id>,
|
||||||
|
ports_to_configure: Vec<PortConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + Switch> Score<T> for BrocadeSwitchScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"BrocadeSwitchScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(BrocadeSwitchInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct BrocadeSwitchInterpret {
|
||||||
|
score: BrocadeSwitchScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!("Applying switch configuration {:?}", self.score);
|
||||||
|
debug!(
|
||||||
|
"Clearing port channel {:?}",
|
||||||
|
self.score.port_channels_to_clear
|
||||||
|
);
|
||||||
|
topology
|
||||||
|
.clear_port_channel(&self.score.port_channels_to_clear)
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
debug!("Configuring interfaces {:?}", self.score.ports_to_configure);
|
||||||
|
topology
|
||||||
|
.configure_interface(&self.score.ports_to_configure)
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
Ok(Outcome::success("switch configured".to_string()))
|
||||||
|
}
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("BrocadeSwitchInterpret")
|
||||||
|
}
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SwitchTopology {
|
||||||
|
client: Box<dyn SwitchClient>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for SwitchTopology {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"SwitchTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
Ok(PreparationOutcome::Noop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SwitchTopology {
|
||||||
|
async fn new() -> Self {
|
||||||
|
let mut options = BrocadeOptions::default();
|
||||||
|
options.ssh.port = 2222;
|
||||||
|
let client =
|
||||||
|
BrocadeSwitchClient::init(&vec![ip!("127.0.0.1")], &"admin", &"password", options)
|
||||||
|
.await
|
||||||
|
.expect("Failed to connect to switch");
|
||||||
|
|
||||||
|
let client = Box::new(client);
|
||||||
|
Self { client }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Switch for SwitchTopology {
|
||||||
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_port_for_mac_address(
|
||||||
|
&self,
|
||||||
|
_mac_address: &MacAddress,
|
||||||
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(&self, _config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||||
|
self.client.clear_port_channel(ids).await
|
||||||
|
}
|
||||||
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||||
|
self.client.configure_interface(ports).await
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||||
inventory::LaunchDiscoverInventoryAgentScore,
|
inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||||
},
|
},
|
||||||
topology::LocalhostTopology,
|
topology::LocalhostTopology,
|
||||||
};
|
};
|
||||||
@@ -18,6 +18,7 @@ async fn main() {
|
|||||||
Box::new(PanicScore {}),
|
Box::new(PanicScore {}),
|
||||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||||
discovery_timeout: Some(10),
|
discovery_timeout: Some(10),
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
None,
|
None,
|
||||||
|
|||||||
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "harmony_inventory_builder"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
tokio.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
cidr.workspace = true
|
||||||
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
cargo build -p harmony_inventory_builder --release --target x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(dirname ${0})"
|
||||||
|
|
||||||
|
cd "${SCRIPT_DIR}/docker/"
|
||||||
|
|
||||||
|
cp ../../../target/x86_64-unknown-linux-musl/release/harmony_inventory_builder .
|
||||||
|
|
||||||
|
docker build . -t hub.nationtech.io/harmony/harmony_inventory_builder
|
||||||
|
|
||||||
|
docker push hub.nationtech.io/harmony/harmony_inventory_builder
|
||||||
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
FROM debian:12-slim
|
||||||
|
|
||||||
|
RUN mkdir /app
|
||||||
|
WORKDIR /app/
|
||||||
|
|
||||||
|
COPY harmony_inventory_builder /app/
|
||||||
|
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
|
||||||
|
CMD ["sleep", "infinity"]
|
||||||
36
examples/harmony_inventory_builder/src/main.rs
Normal file
36
examples/harmony_inventory_builder/src/main.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use harmony::{
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||||
|
topology::LocalhostTopology,
|
||||||
|
};
|
||||||
|
use harmony_macros::cidrv4;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let discover_worker = DiscoverHostForRoleScore {
|
||||||
|
role: HostRole::Worker,
|
||||||
|
number_desired_hosts: 3,
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||||
|
cidr: cidrv4!("192.168.0.1/25"),
|
||||||
|
port: 25000,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let discover_control_plane = DiscoverHostForRoleScore {
|
||||||
|
role: HostRole::ControlPlane,
|
||||||
|
number_desired_hosts: 3,
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||||
|
cidr: cidrv4!("192.168.0.1/25"),
|
||||||
|
port: 25000,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
LocalhostTopology::new(),
|
||||||
|
vec![Box::new(discover_worker), Box::new(discover_control_plane)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::Arc,
|
sync::{Arc, OnceLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
use brocade::BrocadeOptions;
|
||||||
@@ -39,10 +39,10 @@ async fn main() {
|
|||||||
.expect("Failed to get credentials");
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||||
let brocade_options = Some(BrocadeOptions {
|
let brocade_options = BrocadeOptions {
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
});
|
};
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
&switches,
|
&switches,
|
||||||
&switch_auth.username,
|
&switch_auth.username,
|
||||||
@@ -61,6 +61,7 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly
|
||||||
// when setting up the opnsense firewall
|
// when setting up the opnsense firewall
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
@@ -105,7 +106,9 @@ async fn main() {
|
|||||||
name: "wk2".to_string(),
|
name: "wk2".to_string(),
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
use harmony::{
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::monitoring::{
|
|
||||||
alert_channel::discord_alert_channel::DiscordWebhook,
|
|
||||||
okd::cluster_monitoring::OpenshiftClusterAlertScore,
|
|
||||||
},
|
|
||||||
topology::K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
use harmony_macros::hurl;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
harmony_cli::run(
|
|
||||||
Inventory::autoload(),
|
|
||||||
K8sAnywhereTopology::from_env(),
|
|
||||||
vec![Box::new(OpenshiftClusterAlertScore {
|
|
||||||
receivers: vec![Box::new(DiscordWebhook {
|
|
||||||
name: "Webhook example".to_string(),
|
|
||||||
url: hurl!("http://something.o"),
|
|
||||||
})],
|
|
||||||
})],
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,10 @@ use crate::topology::{get_inventory, get_topology};
|
|||||||
use harmony::{
|
use harmony::{
|
||||||
config::secret::SshKeyPair,
|
config::secret::SshKeyPair,
|
||||||
data::{FileContent, FilePath},
|
data::{FileContent, FilePath},
|
||||||
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
modules::{
|
||||||
|
inventory::HarmonyDiscoveryStrategy,
|
||||||
|
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||||
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
@@ -26,7 +29,8 @@ async fn main() {
|
|||||||
},
|
},
|
||||||
})];
|
})];
|
||||||
|
|
||||||
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
|
scores
|
||||||
|
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
|
||||||
|
|
||||||
harmony_cli::run(inventory, topology, scores, None)
|
harmony_cli::run(inventory, topology, scores, None)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -9,7 +9,10 @@ use harmony::{
|
|||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{
|
||||||
|
net::IpAddr,
|
||||||
|
sync::{Arc, OnceLock},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
struct OPNSenseFirewallConfig {
|
struct OPNSenseFirewallConfig {
|
||||||
@@ -28,10 +31,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
.expect("Failed to get credentials");
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
let brocade_options = Some(BrocadeOptions {
|
let brocade_options = BrocadeOptions {
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
});
|
};
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
&switches,
|
&switches,
|
||||||
&switch_auth.username,
|
&switch_auth.username,
|
||||||
@@ -59,6 +62,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -79,7 +83,9 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "bootstrap".to_string(),
|
name: "bootstrap".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ use harmony::{
|
|||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{net::IpAddr, sync::Arc};
|
use std::{
|
||||||
|
net::IpAddr,
|
||||||
|
sync::{Arc, OnceLock},
|
||||||
|
};
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = harmony::topology::LogicalHost {
|
||||||
@@ -23,10 +26,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
.expect("Failed to get credentials");
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||||
let brocade_options = Some(BrocadeOptions {
|
let brocade_options = BrocadeOptions {
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
});
|
};
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
&switches,
|
&switches,
|
||||||
&switch_auth.username,
|
&switch_auth.username,
|
||||||
@@ -54,6 +57,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
let gateway_ipv4 = ipv4!("192.168.1.1");
|
let gateway_ipv4 = ipv4!("192.168.1.1");
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
harmony::topology::HAClusterTopology {
|
harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -74,7 +78,9 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
18
examples/operatorhub_catalog/Cargo.toml
Normal file
18
examples/operatorhub_catalog/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-operatorhub-catalogsource"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
22
examples/operatorhub_catalog/src/main.rs
Normal file
22
examples/operatorhub_catalog/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use harmony::{
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||||
|
topology::K8sAnywhereTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||||
|
let cnpg_operator = CloudNativePgOperatorScore::default();
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
Inventory::autoload(),
|
||||||
|
K8sAnywhereTopology::from_env(),
|
||||||
|
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::Arc,
|
sync::{Arc, OnceLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
use brocade::BrocadeOptions;
|
||||||
@@ -35,10 +35,10 @@ async fn main() {
|
|||||||
.expect("Failed to get credentials");
|
.expect("Failed to get credentials");
|
||||||
|
|
||||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||||
let brocade_options = Some(BrocadeOptions {
|
let brocade_options = BrocadeOptions {
|
||||||
dry_run: *harmony::config::DRY_RUN,
|
dry_run: *harmony::config::DRY_RUN,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
});
|
};
|
||||||
let switch_client = BrocadeSwitchClient::init(
|
let switch_client = BrocadeSwitchClient::init(
|
||||||
&switches,
|
&switches,
|
||||||
&switch_auth.username,
|
&switch_auth.username,
|
||||||
@@ -57,6 +57,7 @@ async fn main() {
|
|||||||
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
let gateway_ipv4 = Ipv4Addr::new(10, 100, 8, 1);
|
||||||
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
let gateway_ip = IpAddr::V4(gateway_ipv4);
|
||||||
let topology = harmony::topology::HAClusterTopology {
|
let topology = harmony::topology::HAClusterTopology {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "demo.harmony.mcd".to_string(),
|
domain_name: "demo.harmony.mcd".to_string(),
|
||||||
router: Arc::new(UnmanagedRouter::new(
|
router: Arc::new(UnmanagedRouter::new(
|
||||||
gateway_ip,
|
gateway_ip,
|
||||||
@@ -77,7 +78,9 @@ async fn main() {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "example-okd-cluster-alerts"
|
name = "example-opnsense-node-exporter"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
readme.workspace = true
|
readme.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
publish = false
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
harmony = { path = "../../harmony" }
|
harmony = { path = "../../harmony" }
|
||||||
@@ -19,4 +18,4 @@ log = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
brocade = { path = "../../brocade" }
|
async-trait.workspace = true
|
||||||
80
examples/opnsense_node_exporter/src/main.rs
Normal file
80
examples/opnsense_node_exporter/src/main.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
use std::{
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use cidr::Ipv4Cidr;
|
||||||
|
use harmony::{
|
||||||
|
executors::ExecutorError,
|
||||||
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::opnsense::node_exporter::NodeExporterScore,
|
||||||
|
topology::{
|
||||||
|
HAClusterTopology, LogicalHost, PreparationError, PreparationOutcome, Topology,
|
||||||
|
UnmanagedRouter, node_exporter::NodeExporter,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use harmony_macros::{ip, ipv4, mac_address};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct OpnSenseTopology {
|
||||||
|
node_exporter: Arc<dyn NodeExporter>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for OpnSenseTopology {
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: "Success".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"OpnsenseTopology"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for OpnSenseTopology {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.ensure_initialized().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.commit_config().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.reload_restart().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let firewall = harmony::topology::LogicalHost {
|
||||||
|
ip: ip!("192.168.1.1"),
|
||||||
|
name: String::from("fw0"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let opnsense = Arc::new(
|
||||||
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
|
);
|
||||||
|
|
||||||
|
let topology = OpnSenseTopology {
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let inventory = Inventory::empty();
|
||||||
|
|
||||||
|
let node_exporter_score = NodeExporterScore {};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
inventory,
|
||||||
|
topology,
|
||||||
|
vec![Box::new(node_exporter_score)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ use harmony_types::net::Url;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "test-rhob-monitoring".to_string(),
|
name: "test-rhob-monitoring".to_string(),
|
||||||
|
dns: "test-rhob-monitoring.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
project_root: PathBuf::from("./webapp"), // Relative from 'harmony-path' param
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use harmony_macros::hurl;
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-rust-webapp".to_string(),
|
name: "harmony-example-rust-webapp".to_string(),
|
||||||
|
dns: "harmony-example-rust-webapp.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./webapp"),
|
project_root: PathBuf::from("./webapp"),
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
service_port: 3000,
|
service_port: 3000,
|
||||||
|
|||||||
@@ -2,12 +2,11 @@ use harmony::{
|
|||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::{
|
modules::{
|
||||||
application::{
|
application::{
|
||||||
ApplicationScore, RustWebFramework, RustWebapp,
|
features::{rhob_monitoring::Monitoring, PackagingDeployment}, ApplicationScore, RustWebFramework, RustWebapp
|
||||||
features::{PackagingDeployment, rhob_monitoring::Monitoring},
|
|
||||||
},
|
},
|
||||||
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
monitoring::alert_channel::discord_alert_channel::DiscordWebhook,
|
||||||
},
|
},
|
||||||
topology::K8sAnywhereTopology,
|
topology::{K8sAnywhereTopology, LocalhostTopology},
|
||||||
};
|
};
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use std::{path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
@@ -22,8 +21,8 @@ async fn main() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let discord_webhook = DiscordWebhook {
|
let discord_webhook = DiscordWebhook {
|
||||||
name: "harmony_demo".to_string(),
|
name: "harmony-demo".to_string(),
|
||||||
url: hurl!("http://not_a_url.com"),
|
url: hurl!("https://discord.com/api/webhooks/1415391405681021050/V6KzV41vQ7yvbn7BchejRu9C8OANxy0i2ESZOz2nvCxG8xAY3-2i3s5MS38k568JKTzH"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let app = ApplicationScore {
|
let app = ApplicationScore {
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use std::{path::PathBuf, sync::Arc};
|
|||||||
async fn main() {
|
async fn main() {
|
||||||
let application = Arc::new(RustWebapp {
|
let application = Arc::new(RustWebapp {
|
||||||
name: "harmony-example-tryrust".to_string(),
|
name: "harmony-example-tryrust".to_string(),
|
||||||
|
dns: "tryrust.example.harmony.mcd".to_string(),
|
||||||
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
project_root: PathBuf::from("./tryrust.org"), // <== Project root, in this case it is a
|
||||||
// submodule
|
// submodule
|
||||||
framework: Some(RustWebFramework::Leptos),
|
framework: Some(RustWebFramework::Leptos),
|
||||||
|
|||||||
@@ -152,10 +152,10 @@ impl PhysicalHost {
|
|||||||
pub fn parts_list(&self) -> String {
|
pub fn parts_list(&self) -> String {
|
||||||
let PhysicalHost {
|
let PhysicalHost {
|
||||||
id,
|
id,
|
||||||
category,
|
category: _,
|
||||||
network,
|
network,
|
||||||
storage,
|
storage,
|
||||||
labels,
|
labels: _,
|
||||||
memory_modules,
|
memory_modules,
|
||||||
cpus,
|
cpus,
|
||||||
} = self;
|
} = self;
|
||||||
@@ -226,8 +226,8 @@ impl PhysicalHost {
|
|||||||
speed_mhz,
|
speed_mhz,
|
||||||
manufacturer,
|
manufacturer,
|
||||||
part_number,
|
part_number,
|
||||||
serial_number,
|
serial_number: _,
|
||||||
rank,
|
rank: _,
|
||||||
} = mem;
|
} = mem;
|
||||||
parts_list.push_str(&format!(
|
parts_list.push_str(&format!(
|
||||||
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ use std::error::Error;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
|
||||||
|
use crate::inventory::HostRole;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
mod repository;
|
mod repository;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
pub use repository::*;
|
pub use repository::*;
|
||||||
|
|
||||||
#[derive(Debug, new, Clone)]
|
#[derive(Debug, new, Clone)]
|
||||||
@@ -69,5 +71,14 @@ pub enum HostRole {
|
|||||||
Bootstrap,
|
Bootstrap,
|
||||||
ControlPlane,
|
ControlPlane,
|
||||||
Worker,
|
Worker,
|
||||||
Storage,
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for HostRole {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
HostRole::Bootstrap => write!(f, "Bootstrap"),
|
||||||
|
HostRole::ControlPlane => write!(f, "ControlPlane"),
|
||||||
|
HostRole::Worker => write!(f, "Worker"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
19
harmony/src/domain/topology/failover.rs
Normal file
19
harmony/src/domain/topology/failover.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::topology::{PreparationError, PreparationOutcome, Topology};
|
||||||
|
|
||||||
|
pub struct FailoverTopology<T> {
|
||||||
|
pub primary: T,
|
||||||
|
pub replica: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Send + Sync> Topology for FailoverTopology<T> {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"FailoverTopology"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,32 +1,25 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use brocade::PortOperatingMode;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
|
id::Id,
|
||||||
net::{MacAddress, Url},
|
net::{MacAddress, Url},
|
||||||
switch::PortLocation,
|
switch::PortLocation,
|
||||||
};
|
};
|
||||||
use k8s_openapi::api::core::v1::Namespace;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::data::FileContent;
|
use crate::{data::FileContent, executors::ExecutorError, topology::node_exporter::NodeExporter};
|
||||||
use crate::executors::ExecutorError;
|
use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig};
|
||||||
use crate::hardware::PhysicalHost;
|
use crate::{modules::inventory::HarmonyDiscoveryStrategy, topology::PxeOptions};
|
||||||
use crate::modules::okd::crd::{
|
|
||||||
InstallPlanApproval, OperatorGroup, OperatorGroupSpec, Subscription, SubscriptionSpec,
|
|
||||||
nmstate::{self, NMState, NodeNetworkConfigurationPolicy, NodeNetworkConfigurationPolicySpec},
|
|
||||||
};
|
|
||||||
use crate::topology::PxeOptions;
|
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
||||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
||||||
Topology, k8s::K8sClient,
|
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
||||||
};
|
};
|
||||||
|
use std::sync::{Arc, OnceLock};
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HAClusterTopology {
|
pub struct HAClusterTopology {
|
||||||
@@ -38,10 +31,13 @@ pub struct HAClusterTopology {
|
|||||||
pub tftp_server: Arc<dyn TftpServer>,
|
pub tftp_server: Arc<dyn TftpServer>,
|
||||||
pub http_server: Arc<dyn HttpServer>,
|
pub http_server: Arc<dyn HttpServer>,
|
||||||
pub dns_server: Arc<dyn DnsServer>,
|
pub dns_server: Arc<dyn DnsServer>,
|
||||||
|
pub node_exporter: Arc<dyn NodeExporter>,
|
||||||
pub switch_client: Arc<dyn SwitchClient>,
|
pub switch_client: Arc<dyn SwitchClient>,
|
||||||
pub bootstrap_host: LogicalHost,
|
pub bootstrap_host: LogicalHost,
|
||||||
pub control_plane: Vec<LogicalHost>,
|
pub control_plane: Vec<LogicalHost>,
|
||||||
pub workers: Vec<LogicalHost>,
|
pub workers: Vec<LogicalHost>,
|
||||||
|
pub kubeconfig: Option<String>,
|
||||||
|
pub network_manager: OnceLock<Arc<dyn NetworkManager>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -60,9 +56,17 @@ impl Topology for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl K8sclient for HAClusterTopology {
|
impl K8sclient for HAClusterTopology {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
Ok(Arc::new(
|
match &self.kubeconfig {
|
||||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
None => Ok(Arc::new(
|
||||||
))
|
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||||
|
)),
|
||||||
|
Some(kubeconfig) => {
|
||||||
|
let Some(client) = K8sClient::from_kubeconfig(kubeconfig).await else {
|
||||||
|
return Err("Failed to create k8s client".to_string());
|
||||||
|
};
|
||||||
|
Ok(Arc::new(client))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,208 +91,12 @@ impl HAClusterTopology {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
pub async fn network_manager(&self) -> &dyn NetworkManager {
|
||||||
// FIXME: Find a way to check nmstate is already available (get pod -n openshift-nmstate)
|
let k8s_client = self.k8s_client().await.unwrap();
|
||||||
debug!("Installing NMState operator...");
|
|
||||||
let k8s_client = self.k8s_client().await?;
|
|
||||||
|
|
||||||
let nmstate_namespace = Namespace {
|
self.network_manager
|
||||||
metadata: ObjectMeta {
|
.get_or_init(|| Arc::new(OpenShiftNmStateNetworkManager::new(k8s_client.clone())))
|
||||||
name: Some("openshift-nmstate".to_string()),
|
.as_ref()
|
||||||
finalizers: Some(vec!["kubernetes".to_string()]),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
debug!("Creating NMState namespace: {nmstate_namespace:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_namespace, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate_operator_group = OperatorGroup {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("openshift-nmstate".to_string()),
|
|
||||||
namespace: Some("openshift-nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: OperatorGroupSpec {
|
|
||||||
target_namespaces: vec!["openshift-nmstate".to_string()],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
debug!("Creating NMState operator group: {nmstate_operator_group:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_operator_group, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate_subscription = Subscription {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("kubernetes-nmstate-operator".to_string()),
|
|
||||||
namespace: Some("openshift-nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: SubscriptionSpec {
|
|
||||||
channel: Some("stable".to_string()),
|
|
||||||
install_plan_approval: Some(InstallPlanApproval::Automatic),
|
|
||||||
name: "kubernetes-nmstate-operator".to_string(),
|
|
||||||
source: "redhat-operators".to_string(),
|
|
||||||
source_namespace: "openshift-marketplace".to_string(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
debug!("Subscribing to NMState Operator: {nmstate_subscription:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate_subscription, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let nmstate = NMState {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
debug!("Creating NMState: {nmstate:#?}");
|
|
||||||
k8s_client
|
|
||||||
.apply(&nmstate, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_next_bond_id(&self) -> u8 {
|
|
||||||
42 // FIXME: Find a better way to declare the bond id
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_bond(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
self.ensure_nmstate_operator_installed()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
SwitchError::new(format!(
|
|
||||||
"Can't configure bond, NMState operator not available: {e}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let bond_config = self.create_bond_configuration(host, config);
|
|
||||||
debug!("Configuring bond for host {host:?}: {bond_config:#?}");
|
|
||||||
self.k8s_client()
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.apply(&bond_config, None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_bond_configuration(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> NodeNetworkConfigurationPolicy {
|
|
||||||
let host_name = host.id.clone();
|
|
||||||
|
|
||||||
let bond_id = self.get_next_bond_id();
|
|
||||||
let bond_name = format!("bond{bond_id}");
|
|
||||||
let mut bond_mtu: Option<u32> = None;
|
|
||||||
let mut bond_mac_address: Option<String> = None;
|
|
||||||
let mut bond_ports = Vec::new();
|
|
||||||
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
|
||||||
|
|
||||||
for switch_port in &config.switch_ports {
|
|
||||||
let interface_name = switch_port.interface.name.clone();
|
|
||||||
|
|
||||||
interfaces.push(nmstate::InterfaceSpec {
|
|
||||||
name: interface_name.clone(),
|
|
||||||
description: Some(format!("Member of bond {bond_name}")),
|
|
||||||
r#type: "ethernet".to_string(),
|
|
||||||
state: "up".to_string(),
|
|
||||||
mtu: Some(switch_port.interface.mtu),
|
|
||||||
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: None,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
bond_ports.push(interface_name);
|
|
||||||
|
|
||||||
// Use the first port's details for the bond mtu and mac address
|
|
||||||
if bond_mtu.is_none() {
|
|
||||||
bond_mtu = Some(switch_port.interface.mtu);
|
|
||||||
}
|
|
||||||
if bond_mac_address.is_none() {
|
|
||||||
bond_mac_address = Some(switch_port.interface.mac_address.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
interfaces.push(nmstate::InterfaceSpec {
|
|
||||||
name: bond_name.clone(),
|
|
||||||
description: Some(format!("Network bond for host {host_name}")),
|
|
||||||
r#type: "bond".to_string(),
|
|
||||||
state: "up".to_string(),
|
|
||||||
mtu: bond_mtu,
|
|
||||||
mac_address: bond_mac_address,
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
autoconf: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: Some(nmstate::BondSpec {
|
|
||||||
mode: "802.3ad".to_string(),
|
|
||||||
ports: bond_ports,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
NodeNetworkConfigurationPolicy {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("{host_name}-bond-config")),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: NodeNetworkConfigurationPolicySpec {
|
|
||||||
node_selector: Some(BTreeMap::from([(
|
|
||||||
"kubernetes.io/hostname".to_string(),
|
|
||||||
host_name.to_string(),
|
|
||||||
)])),
|
|
||||||
desired_state: nmstate::DesiredStateSpec { interfaces },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_port_channel(
|
|
||||||
&self,
|
|
||||||
host: &PhysicalHost,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
|
||||||
debug!("Configuring port channel: {config:#?}");
|
|
||||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
|
||||||
|
|
||||||
self.switch_client
|
|
||||||
.configure_port_channel(&format!("Harmony_{}", host.id), switch_ports)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
@@ -299,6 +107,7 @@ impl HAClusterTopology {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
kubeconfig: None,
|
||||||
domain_name: "DummyTopology".to_string(),
|
domain_name: "DummyTopology".to_string(),
|
||||||
router: dummy_infra.clone(),
|
router: dummy_infra.clone(),
|
||||||
load_balancer: dummy_infra.clone(),
|
load_balancer: dummy_infra.clone(),
|
||||||
@@ -307,10 +116,12 @@ impl HAClusterTopology {
|
|||||||
tftp_server: dummy_infra.clone(),
|
tftp_server: dummy_infra.clone(),
|
||||||
http_server: dummy_infra.clone(),
|
http_server: dummy_infra.clone(),
|
||||||
dns_server: dummy_infra.clone(),
|
dns_server: dummy_infra.clone(),
|
||||||
|
node_exporter: dummy_infra.clone(),
|
||||||
switch_client: dummy_infra.clone(),
|
switch_client: dummy_infra.clone(),
|
||||||
bootstrap_host: dummy_host,
|
bootstrap_host: dummy_host,
|
||||||
control_plane: vec![],
|
control_plane: vec![],
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
network_manager: OnceLock::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -468,25 +279,64 @@ impl HttpServer for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for HAClusterTopology {
|
impl Switch for HAClusterTopology {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
self.switch_client.setup().await?;
|
self.switch_client.setup().await.map(|_| ())
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_port_for_mac_address(
|
async fn get_port_for_mac_address(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
let port = self.switch_client.find_port(mac_address).await?;
|
self.switch_client.find_port(mac_address).await
|
||||||
Ok(port)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_host_network(
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
&self,
|
debug!("Configuring port channel: {config:#?}");
|
||||||
host: &PhysicalHost,
|
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
self.switch_client
|
||||||
self.configure_bond(host, &config).await?;
|
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||||
self.configure_port_channel(host, &config).await
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NetworkManager for HAClusterTopology {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
|
self.network_manager()
|
||||||
|
.await
|
||||||
|
.ensure_network_manager_installed()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||||
|
self.network_manager().await.configure_bond(config).await
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO add snmp here
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for HAClusterTopology {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.ensure_initialized().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.commit_config().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.reload_restart().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -677,6 +527,21 @@ impl DnsServer for DummyInfra {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for DummyInfra {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl SwitchClient for DummyInfra {
|
impl SwitchClient for DummyInfra {
|
||||||
async fn setup(&self) -> Result<(), SwitchError> {
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
@@ -697,4 +562,10 @@ impl SwitchClient for DummyInfra {
|
|||||||
) -> Result<u8, SwitchError> {
|
) -> Result<u8, SwitchError> {
|
||||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
}
|
}
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,23 @@
|
|||||||
use std::time::Duration;
|
use std::{collections::HashMap, time::Duration};
|
||||||
|
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use k8s_openapi::{
|
use k8s_openapi::{
|
||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{
|
api::{
|
||||||
apps::v1::Deployment,
|
apps::v1::Deployment,
|
||||||
core::v1::{Pod, ServiceAccount},
|
core::v1::{Node, Pod, ServiceAccount},
|
||||||
},
|
},
|
||||||
|
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
|
||||||
apimachinery::pkg::version::Info,
|
apimachinery::pkg::version::Info,
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Discovery, Error, Resource,
|
Client, Config, Discovery, Error, Resource,
|
||||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
api::{
|
||||||
|
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||||
|
},
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
|
discovery::{ApiCapabilities, Scope},
|
||||||
error::DiscoveryError,
|
error::DiscoveryError,
|
||||||
runtime::reflector::Lookup,
|
runtime::reflector::Lookup,
|
||||||
};
|
};
|
||||||
@@ -22,11 +26,12 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, info, trace};
|
use log::{debug, error, trace, warn};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::json;
|
use serde_json::{Value, json};
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
use tokio::{io::AsyncReadExt, time::sleep};
|
use tokio::{io::AsyncReadExt, time::sleep};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
#[derive(new, Clone)]
|
#[derive(new, Clone)]
|
||||||
pub struct K8sClient {
|
pub struct K8sClient {
|
||||||
@@ -60,6 +65,149 @@ impl K8sClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if any deployment in the given namespace matching the label selector
|
||||||
|
/// has status.availableReplicas > 0 (or condition Available=True).
|
||||||
|
pub async fn has_healthy_deployment_with_label(
|
||||||
|
&self,
|
||||||
|
namespace: &str,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
|
||||||
|
let lp = ListParams::default().labels(label_selector);
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
for d in list.items {
|
||||||
|
// Check AvailableReplicas > 0 or Available condition
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
if available > 0 {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
// Fallback: scan conditions
|
||||||
|
if let Some(conds) = d.status.as_ref().and_then(|s| s.conditions.as_ref()) {
|
||||||
|
if conds
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||||
|
{
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cluster-wide: returns namespaces that have at least one healthy deployment
|
||||||
|
/// matching the label selector (equivalent to kubectl -A -l ...).
|
||||||
|
pub async fn list_namespaces_with_healthy_deployments(
|
||||||
|
&self,
|
||||||
|
label_selector: &str,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
|
let api: Api<Deployment> = Api::all(self.client.clone());
|
||||||
|
let lp = ListParams::default().labels(label_selector);
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
|
||||||
|
let mut healthy_ns: HashMap<String, bool> = HashMap::new();
|
||||||
|
for d in list.items {
|
||||||
|
let ns = match d.metadata.namespace.clone() {
|
||||||
|
Some(n) => n,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let available = d
|
||||||
|
.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.available_replicas)
|
||||||
|
.unwrap_or(0);
|
||||||
|
let is_healthy = if available > 0 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
d.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.conditions.as_ref())
|
||||||
|
.map(|conds| {
|
||||||
|
conds
|
||||||
|
.iter()
|
||||||
|
.any(|c| c.type_ == "Available" && c.status == "True")
|
||||||
|
})
|
||||||
|
.unwrap_or(false)
|
||||||
|
};
|
||||||
|
if is_healthy {
|
||||||
|
healthy_ns.insert(ns, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(healthy_ns.into_keys().collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the application-controller ServiceAccount name (fallback to default)
|
||||||
|
pub async fn get_controller_service_account_name(
|
||||||
|
&self,
|
||||||
|
ns: &str,
|
||||||
|
) -> Result<Option<String>, Error> {
|
||||||
|
let api: Api<Deployment> = Api::namespaced(self.client.clone(), ns);
|
||||||
|
let lp = ListParams::default().labels("app.kubernetes.io/component=controller");
|
||||||
|
let list = api.list(&lp).await?;
|
||||||
|
if let Some(dep) = list.items.get(0) {
|
||||||
|
if let Some(sa) = dep
|
||||||
|
.spec
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|ds| ds.template.spec.as_ref())
|
||||||
|
.and_then(|ps| ps.service_account_name.clone())
|
||||||
|
{
|
||||||
|
return Ok(Some(sa));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List ClusterRoleBindings dynamically and return as JSON values
|
||||||
|
pub async fn list_clusterrolebindings_json(&self) -> Result<Vec<Value>, Error> {
|
||||||
|
let gvk = kube::api::GroupVersionKind::gvk(
|
||||||
|
"rbac.authorization.k8s.io",
|
||||||
|
"v1",
|
||||||
|
"ClusterRoleBinding",
|
||||||
|
);
|
||||||
|
let ar = kube::api::ApiResource::from_gvk(&gvk);
|
||||||
|
let api: Api<kube::api::DynamicObject> = Api::all_with(self.client.clone(), &ar);
|
||||||
|
let crbs = api.list(&ListParams::default()).await?;
|
||||||
|
let mut out = Vec::new();
|
||||||
|
for o in crbs {
|
||||||
|
let v = serde_json::to_value(&o).unwrap_or(Value::Null);
|
||||||
|
out.push(v);
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine if Argo controller in ns has cluster-wide permissions via CRBs
|
||||||
|
// TODO This does not belong in the generic k8s client, should be refactored at some point
|
||||||
|
pub async fn is_service_account_cluster_wide(&self, sa: &str, ns: &str) -> Result<bool, Error> {
|
||||||
|
let crbs = self.list_clusterrolebindings_json().await?;
|
||||||
|
let sa_user = format!("system:serviceaccount:{}:{}", ns, sa);
|
||||||
|
for crb in crbs {
|
||||||
|
if let Some(subjects) = crb.get("subjects").and_then(|s| s.as_array()) {
|
||||||
|
for subj in subjects {
|
||||||
|
let kind = subj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let name = subj.get("name").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
let subj_ns = subj.get("namespace").and_then(|v| v.as_str()).unwrap_or("");
|
||||||
|
if (kind == "ServiceAccount" && name == sa && subj_ns == ns)
|
||||||
|
|| (kind == "User" && name == sa_user)
|
||||||
|
{
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn has_crd(&self, name: &str) -> Result<bool, Error> {
|
||||||
|
let api: Api<CustomResourceDefinition> = Api::all(self.client.clone());
|
||||||
|
let lp = ListParams::default().fields(&format!("metadata.name={}", name));
|
||||||
|
let crds = api.list(&lp).await?;
|
||||||
|
Ok(!crds.items.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
pub async fn service_account_api(&self, namespace: &str) -> Api<ServiceAccount> {
|
||||||
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
let api: Api<ServiceAccount> = Api::namespaced(self.client.clone(), namespace);
|
||||||
api
|
api
|
||||||
@@ -88,24 +236,8 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced_with(self.client.clone(), &gvk)
|
Api::default_namespaced_with(self.client.clone(), &gvk)
|
||||||
};
|
};
|
||||||
Ok(resource.get(name).await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_secret_json_value(
|
resource.get(name).await
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<DynamicObject, Error> {
|
|
||||||
self.get_resource_json_value(
|
|
||||||
name,
|
|
||||||
namespace,
|
|
||||||
&GroupVersionKind {
|
|
||||||
group: "".to_string(),
|
|
||||||
version: "v1".to_string(),
|
|
||||||
kind: "Secret".to_string(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_deployment(
|
pub async fn get_deployment(
|
||||||
@@ -120,8 +252,9 @@ impl K8sClient {
|
|||||||
debug!("getting default namespace deployment");
|
debug!("getting default namespace deployment");
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
debug!("getting deployment {} in ns {}", name, namespace.unwrap());
|
||||||
Ok(deps.get_opt(name).await?)
|
deps.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
pub async fn get_pod(&self, name: &str, namespace: Option<&str>) -> Result<Option<Pod>, Error> {
|
||||||
@@ -130,7 +263,8 @@ impl K8sClient {
|
|||||||
} else {
|
} else {
|
||||||
Api::default_namespaced(self.client.clone())
|
Api::default_namespaced(self.client.clone())
|
||||||
};
|
};
|
||||||
Ok(pods.get_opt(name).await?)
|
|
||||||
|
pods.get_opt(name).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn scale_deployment(
|
pub async fn scale_deployment(
|
||||||
@@ -173,9 +307,9 @@ impl K8sClient {
|
|||||||
|
|
||||||
pub async fn wait_until_deployment_ready(
|
pub async fn wait_until_deployment_ready(
|
||||||
&self,
|
&self,
|
||||||
name: String,
|
name: &str,
|
||||||
namespace: Option<&str>,
|
namespace: Option<&str>,
|
||||||
timeout: Option<u64>,
|
timeout: Option<Duration>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let api: Api<Deployment>;
|
let api: Api<Deployment>;
|
||||||
|
|
||||||
@@ -185,9 +319,9 @@ impl K8sClient {
|
|||||||
api = Api::default_namespaced(self.client.clone());
|
api = Api::default_namespaced(self.client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let establish = await_condition(api, name.as_str(), conditions::is_deployment_completed());
|
let establish = await_condition(api, name, conditions::is_deployment_completed());
|
||||||
let t = timeout.unwrap_or(300);
|
let timeout = timeout.unwrap_or(Duration::from_secs(120));
|
||||||
let res = tokio::time::timeout(std::time::Duration::from_secs(t), establish).await;
|
let res = tokio::time::timeout(timeout, establish).await;
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -277,7 +411,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
if let Some(s) = status.status {
|
if let Some(s) = status.status {
|
||||||
let mut stdout_buf = String::new();
|
let mut stdout_buf = String::new();
|
||||||
if let Some(mut stdout) = process.stdout().take() {
|
if let Some(mut stdout) = process.stdout() {
|
||||||
stdout
|
stdout
|
||||||
.read_to_string(&mut stdout_buf)
|
.read_to_string(&mut stdout_buf)
|
||||||
.await
|
.await
|
||||||
@@ -383,14 +517,14 @@ impl K8sClient {
|
|||||||
Ok(current) => {
|
Ok(current) => {
|
||||||
trace!("Received current value {current:#?}");
|
trace!("Received current value {current:#?}");
|
||||||
// The resource exists, so we calculate and display a diff.
|
// The resource exists, so we calculate and display a diff.
|
||||||
println!("\nPerforming dry-run for resource: '{}'", name);
|
println!("\nPerforming dry-run for resource: '{name}'");
|
||||||
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
let mut current_yaml = serde_yaml::to_value(¤t).unwrap_or_else(|_| {
|
||||||
panic!("Could not serialize current value : {current:#?}")
|
panic!("Could not serialize current value : {current:#?}")
|
||||||
});
|
});
|
||||||
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
if current_yaml.is_mapping() && current_yaml.get("status").is_some() {
|
||||||
let map = current_yaml.as_mapping_mut().unwrap();
|
let map = current_yaml.as_mapping_mut().unwrap();
|
||||||
let removed = map.remove_entry("status");
|
let removed = map.remove_entry("status");
|
||||||
trace!("Removed status {:?}", removed);
|
trace!("Removed status {removed:?}");
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"Did not find status entry for current object {}/{}",
|
"Did not find status entry for current object {}/{}",
|
||||||
@@ -419,14 +553,14 @@ impl K8sClient {
|
|||||||
similar::ChangeTag::Insert => "+",
|
similar::ChangeTag::Insert => "+",
|
||||||
similar::ChangeTag::Equal => " ",
|
similar::ChangeTag::Equal => " ",
|
||||||
};
|
};
|
||||||
print!("{}{}", sign, change);
|
print!("{sign}{change}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been applied.
|
// In a dry run, we return the new resource state that would have been applied.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
Err(Error::Api(ErrorResponse { code: 404, .. })) => {
|
||||||
// The resource does not exist, so the "diff" is the entire new resource.
|
// The resource does not exist, so the "diff" is the entire new resource.
|
||||||
println!("\nPerforming dry-run for new resource: '{}'", name);
|
println!("\nPerforming dry-run for new resource: '{name}'");
|
||||||
println!(
|
println!(
|
||||||
"Resource does not exist. It would be created with the following content:"
|
"Resource does not exist. It would be created with the following content:"
|
||||||
);
|
);
|
||||||
@@ -435,14 +569,14 @@ impl K8sClient {
|
|||||||
|
|
||||||
// Print each line of the new resource with a '+' prefix.
|
// Print each line of the new resource with a '+' prefix.
|
||||||
for line in new_yaml.lines() {
|
for line in new_yaml.lines() {
|
||||||
println!("+{}", line);
|
println!("+{line}");
|
||||||
}
|
}
|
||||||
// In a dry run, we return the new resource state that would have been created.
|
// In a dry run, we return the new resource state that would have been created.
|
||||||
Ok(resource.clone())
|
Ok(resource.clone())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Another API error occurred.
|
// Another API error occurred.
|
||||||
error!("Failed to get resource '{}': {}", name, e);
|
error!("Failed to get resource '{name}': {e}");
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -457,7 +591,7 @@ impl K8sClient {
|
|||||||
where
|
where
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned + serde::Serialize,
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for r in resource.iter() {
|
for r in resource.iter() {
|
||||||
@@ -522,10 +656,7 @@ impl K8sClient {
|
|||||||
|
|
||||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||||
// This will create the resource if it doesn't exist, or update it if it does.
|
// This will create the resource if it doesn't exist, or update it if it does.
|
||||||
println!(
|
println!("Applying '{name}' in namespace '{namespace}'...",);
|
||||||
"Applying Argo Application '{}' in namespace '{}'...",
|
|
||||||
name, namespace
|
|
||||||
);
|
|
||||||
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
let patch_params = PatchParams::apply("harmony"); // Use a unique field manager name
|
||||||
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
let result = api.patch(name, &patch_params, &Patch::Apply(&obj)).await?;
|
||||||
|
|
||||||
@@ -534,7 +665,103 @@ impl K8sClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
/// Apply a resource from a URL
|
||||||
|
///
|
||||||
|
/// It is the equivalent of `kubectl apply -f <url>`
|
||||||
|
pub async fn apply_url(&self, url: Url, ns: Option<&str>) -> Result<(), Error> {
|
||||||
|
let patch_params = PatchParams::apply("harmony");
|
||||||
|
let discovery = kube::Discovery::new(self.client.clone()).run().await?;
|
||||||
|
|
||||||
|
let yaml = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.expect("Could not get URL")
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.expect("Could not get content from URL");
|
||||||
|
|
||||||
|
for doc in multidoc_deserialize(&yaml).expect("failed to parse YAML from file") {
|
||||||
|
let obj: DynamicObject =
|
||||||
|
serde_yaml::from_value(doc).expect("cannot apply without valid YAML");
|
||||||
|
let namespace = obj.metadata.namespace.as_deref().or(ns);
|
||||||
|
let type_meta = obj
|
||||||
|
.types
|
||||||
|
.as_ref()
|
||||||
|
.expect("cannot apply object without valid TypeMeta");
|
||||||
|
let gvk = GroupVersionKind::try_from(type_meta)
|
||||||
|
.expect("cannot apply object without valid GroupVersionKind");
|
||||||
|
let name = obj.name_any();
|
||||||
|
|
||||||
|
if let Some((ar, caps)) = discovery.resolve_gvk(&gvk) {
|
||||||
|
let api = get_dynamic_api(ar, caps, self.client.clone(), namespace, false);
|
||||||
|
trace!(
|
||||||
|
"Applying {}: \n{}",
|
||||||
|
gvk.kind,
|
||||||
|
serde_yaml::to_string(&obj).expect("Failed to serialize YAML")
|
||||||
|
);
|
||||||
|
let data: serde_json::Value =
|
||||||
|
serde_json::to_value(&obj).expect("Failed to serialize JSON");
|
||||||
|
let _r = api.patch(&name, &patch_params, &Patch::Apply(data)).await?;
|
||||||
|
debug!("applied {} {}", gvk.kind, name);
|
||||||
|
} else {
|
||||||
|
warn!("Cannot apply document for unknown {gvk:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a single named resource of a specific type `K`.
|
||||||
|
///
|
||||||
|
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||||
|
/// whether to look in a specific namespace or in the entire cluster.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` if the resource is not found (404).
|
||||||
|
pub async fn get_resource<K>(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
) -> Result<Option<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||||
|
|
||||||
|
api.get_opt(name).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all resources of a specific type `K`.
|
||||||
|
///
|
||||||
|
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||||
|
/// whether to list from a specific namespace or from the entire cluster.
|
||||||
|
pub async fn list_resources<K>(
|
||||||
|
&self,
|
||||||
|
namespace: Option<&str>,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<K>, Error>
|
||||||
|
where
|
||||||
|
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||||
|
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||||
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
|
{
|
||||||
|
let api: Api<K> =
|
||||||
|
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||||
|
|
||||||
|
let list_params = list_params.unwrap_or_default();
|
||||||
|
api.list(&list_params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetches a list of all Nodes in the cluster.
|
||||||
|
pub async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
list_params: Option<ListParams>,
|
||||||
|
) -> Result<ObjectList<Node>, Error> {
|
||||||
|
self.list_resources(None, list_params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -553,6 +780,31 @@ impl K8sClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_dynamic_api(
|
||||||
|
resource: ApiResource,
|
||||||
|
capabilities: ApiCapabilities,
|
||||||
|
client: Client,
|
||||||
|
ns: Option<&str>,
|
||||||
|
all: bool,
|
||||||
|
) -> Api<DynamicObject> {
|
||||||
|
if capabilities.scope == Scope::Cluster || all {
|
||||||
|
Api::all_with(client, &resource)
|
||||||
|
} else if let Some(namespace) = ns {
|
||||||
|
Api::namespaced_with(client, namespace, &resource)
|
||||||
|
} else {
|
||||||
|
Api::default_namespaced_with(client, &resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn multidoc_deserialize(data: &str) -> Result<Vec<serde_yaml::Value>, serde_yaml::Error> {
|
||||||
|
use serde::Deserialize;
|
||||||
|
let mut docs = vec![];
|
||||||
|
for de in serde_yaml::Deserializer::from_str(data) {
|
||||||
|
docs.push(serde_yaml::Value::deserialize(de)?);
|
||||||
|
}
|
||||||
|
Ok(docs)
|
||||||
|
}
|
||||||
|
|
||||||
pub trait ApplyStrategy<K: Resource> {
|
pub trait ApplyStrategy<K: Resource> {
|
||||||
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
fn get_api(client: &Client, ns: Option<&str>) -> Api<K>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{collections::BTreeMap, process::Command, sync::Arc};
|
use std::{collections::BTreeMap, process::Command, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use base64::{Engine, engine::general_purpose};
|
use base64::{Engine, engine::general_purpose};
|
||||||
@@ -7,7 +7,7 @@ use k8s_openapi::api::{
|
|||||||
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
rbac::v1::{ClusterRoleBinding, RoleRef, Subject},
|
||||||
};
|
};
|
||||||
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
|
use kube::api::{DynamicObject, GroupVersionKind, ObjectMeta};
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, trace, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
@@ -88,6 +88,7 @@ pub struct K8sAnywhereTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl K8sclient for K8sAnywhereTopology {
|
impl K8sclient for K8sAnywhereTopology {
|
||||||
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
async fn k8s_client(&self) -> Result<Arc<K8sClient>, String> {
|
||||||
|
trace!("getting k8s client");
|
||||||
let state = match self.k8s_state.get() {
|
let state = match self.k8s_state.get() {
|
||||||
Some(state) => state,
|
Some(state) => state,
|
||||||
None => return Err("K8s state not initialized yet".to_string()),
|
None => return Err("K8s state not initialized yet".to_string()),
|
||||||
@@ -155,9 +156,9 @@ impl Grafana for K8sAnywhereTopology {
|
|||||||
//TODO change this to a ensure ready or something better than just a timeout
|
//TODO change this to a ensure ready or something better than just a timeout
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready(
|
.wait_until_deployment_ready(
|
||||||
"grafana-grafana-deployment".to_string(),
|
"grafana-grafana-deployment",
|
||||||
Some("grafana"),
|
Some("grafana"),
|
||||||
Some(30),
|
Some(Duration::from_secs(30)),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@@ -975,36 +976,68 @@ impl TenantManager for K8sAnywhereTopology {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Ingress for K8sAnywhereTopology {
|
impl Ingress for K8sAnywhereTopology {
|
||||||
//TODO this is specifically for openshift/okd which violates the k8sanywhere idea
|
|
||||||
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
async fn get_domain(&self, service: &str) -> Result<String, PreparationError> {
|
||||||
|
use log::{debug, trace, warn};
|
||||||
|
|
||||||
let client = self.k8s_client().await?;
|
let client = self.k8s_client().await?;
|
||||||
|
|
||||||
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
if let Some(Some(k8s_state)) = self.k8s_state.get() {
|
||||||
match k8s_state.source {
|
match k8s_state.source {
|
||||||
K8sSource::LocalK3d => Ok(format!("{service}.local.k3d")),
|
K8sSource::LocalK3d => {
|
||||||
|
// Local developer UX
|
||||||
|
return Ok(format!("{service}.local.k3d"));
|
||||||
|
}
|
||||||
K8sSource::Kubeconfig => {
|
K8sSource::Kubeconfig => {
|
||||||
self.openshift_ingress_operator_available().await?;
|
trace!("K8sSource is kubeconfig; attempting to detect domain");
|
||||||
|
|
||||||
let gvk = GroupVersionKind {
|
// 1) Try OpenShift IngressController domain (backward compatible)
|
||||||
group: "operator.openshift.io".into(),
|
if self.openshift_ingress_operator_available().await.is_ok() {
|
||||||
version: "v1".into(),
|
trace!("OpenShift ingress operator detected; using IngressController");
|
||||||
kind: "IngressController".into(),
|
let gvk = GroupVersionKind {
|
||||||
};
|
group: "operator.openshift.io".into(),
|
||||||
let ic = client
|
version: "v1".into(),
|
||||||
.get_resource_json_value(
|
kind: "IngressController".into(),
|
||||||
"default",
|
};
|
||||||
Some("openshift-ingress-operator"),
|
let ic = client
|
||||||
&gvk,
|
.get_resource_json_value(
|
||||||
)
|
"default",
|
||||||
.await
|
Some("openshift-ingress-operator"),
|
||||||
.map_err(|_| {
|
&gvk,
|
||||||
PreparationError::new("Failed to fetch IngressController".to_string())
|
)
|
||||||
})?;
|
.await
|
||||||
|
.map_err(|_| {
|
||||||
|
PreparationError::new(
|
||||||
|
"Failed to fetch IngressController".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
if let Some(domain) = ic.data["status"]["domain"].as_str() {
|
||||||
Some(domain) => Ok(format!("{service}.{domain}")),
|
return Ok(format!("{service}.{domain}"));
|
||||||
None => Err(PreparationError::new("Could not find domain".to_string())),
|
} else {
|
||||||
|
warn!("OpenShift IngressController present but no status.domain set");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
trace!(
|
||||||
|
"OpenShift ingress operator not detected; trying generic Kubernetes"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 2) Try NGINX Ingress Controller common setups
|
||||||
|
// 2.a) Well-known namespace/name for the controller Service
|
||||||
|
// - upstream default: namespace "ingress-nginx", service "ingress-nginx-controller"
|
||||||
|
// - some distros: "ingress-nginx-controller" svc in "ingress-nginx" ns
|
||||||
|
// If found with LoadBalancer ingress hostname, use its base domain.
|
||||||
|
if let Some(domain) = try_nginx_lb_domain(&client).await? {
|
||||||
|
return Ok(format!("{service}.{domain}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) Fallback: internal cluster DNS suffix (service.namespace.svc.cluster.local)
|
||||||
|
// We don't have tenant namespace here, so we fallback to 'default' with a warning.
|
||||||
|
warn!(
|
||||||
|
"Could not determine external ingress domain; falling back to internal-only DNS"
|
||||||
|
);
|
||||||
|
let internal = format!("{service}.default.svc.cluster.local");
|
||||||
|
Ok(internal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -1014,3 +1047,63 @@ impl Ingress for K8sAnywhereTopology {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn try_nginx_lb_domain(client: &K8sClient) -> Result<Option<String>, PreparationError> {
|
||||||
|
use log::{debug, trace};
|
||||||
|
|
||||||
|
// Try common service path: svc/ingress-nginx-controller in ns/ingress-nginx
|
||||||
|
let svc_gvk = GroupVersionKind {
|
||||||
|
group: "".into(), // core
|
||||||
|
version: "v1".into(),
|
||||||
|
kind: "Service".into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let candidates = [
|
||||||
|
("ingress-nginx", "ingress-nginx-controller"),
|
||||||
|
("ingress-nginx", "ingress-nginx-controller-internal"),
|
||||||
|
("ingress-nginx", "ingress-nginx"), // some charts name the svc like this
|
||||||
|
("kube-system", "ingress-nginx-controller"), // less common but seen
|
||||||
|
];
|
||||||
|
|
||||||
|
for (ns, name) in candidates {
|
||||||
|
trace!("Checking NGINX Service {ns}/{name} for LoadBalancer hostname");
|
||||||
|
if let Ok(svc) = client
|
||||||
|
.get_resource_json_value(ns, Some(name), &svc_gvk)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
let lb_hosts = svc.data["status"]["loadBalancer"]["ingress"]
|
||||||
|
.as_array()
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
for entry in lb_hosts {
|
||||||
|
if let Some(host) = entry.get("hostname").and_then(|v| v.as_str()) {
|
||||||
|
debug!("Found NGINX LB hostname: {host}");
|
||||||
|
if let Some(domain) = extract_base_domain(host) {
|
||||||
|
return Ok(Some(domain.to_string()));
|
||||||
|
} else {
|
||||||
|
return Ok(Some(host.to_string())); // already a domain
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(ip) = entry.get("ip").and_then(|v| v.as_str()) {
|
||||||
|
// If only an IP is exposed, we can't create a hostname; return None to keep searching
|
||||||
|
debug!("NGINX LB exposes IP {ip} (no hostname); skipping");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_base_domain(host: &str) -> Option<String> {
|
||||||
|
// For a host like a1b2c3d4e5f6abcdef.elb.amazonaws.com -> base domain elb.amazonaws.com
|
||||||
|
// For a managed DNS like xyz.example.com -> base domain example.com (keep 2+ labels)
|
||||||
|
// Heuristic: keep last 2 labels by default; special-case known multi-label TLDs if needed.
|
||||||
|
let parts: Vec<&str> = host.split('.').collect();
|
||||||
|
if parts.len() >= 2 {
|
||||||
|
// Very conservative: last 2 labels
|
||||||
|
Some(parts[parts.len() - 2..].join("."))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
|
mod failover;
|
||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
|
pub mod node_exporter;
|
||||||
|
pub use failover::*;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
@@ -186,7 +189,7 @@ impl TopologyState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum DeploymentTarget {
|
pub enum DeploymentTarget {
|
||||||
LocalDev,
|
LocalDev,
|
||||||
Staging,
|
Staging,
|
||||||
|
|||||||
@@ -7,14 +7,16 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use brocade::PortOperatingMode;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
|
id::Id,
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
switch::PortLocation,
|
switch::PortLocation,
|
||||||
};
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
use super::{LogicalHost, k8s::K8sClient};
|
use super::{LogicalHost, k8s::K8sClient};
|
||||||
|
|
||||||
@@ -182,6 +184,39 @@ impl FromStr for DnsRecordType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait NetworkManager: Debug + Send + Sync {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError>;
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, new)]
|
||||||
|
pub struct NetworkError {
|
||||||
|
msg: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for NetworkError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str(&self.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for NetworkError {}
|
||||||
|
|
||||||
|
impl From<kube::Error> for NetworkError {
|
||||||
|
fn from(value: kube::Error) -> Self {
|
||||||
|
NetworkError::new(value.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for NetworkError {
|
||||||
|
fn from(value: String) -> Self {
|
||||||
|
NetworkError::new(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type PortConfig = (PortLocation, PortOperatingMode);
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Switch: Send + Sync {
|
pub trait Switch: Send + Sync {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||||
@@ -191,15 +226,14 @@ pub trait Switch: Send + Sync {
|
|||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError>;
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
async fn configure_host_network(
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||||
&self,
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||||
host: &PhysicalHost,
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct HostNetworkConfig {
|
pub struct HostNetworkConfig {
|
||||||
|
pub host_id: Id,
|
||||||
pub switch_ports: Vec<SwitchPort>,
|
pub switch_ports: Vec<SwitchPort>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,6 +288,9 @@ pub trait SwitchClient: Debug + Send + Sync {
|
|||||||
channel_name: &str,
|
channel_name: &str,
|
||||||
switch_ports: Vec<PortLocation>,
|
switch_ports: Vec<PortLocation>,
|
||||||
) -> Result<u8, SwitchError>;
|
) -> Result<u8, SwitchError>;
|
||||||
|
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||||
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
17
harmony/src/domain/topology/node_exporter.rs
Normal file
17
harmony/src/domain/topology/node_exporter.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait NodeExporter: Send + Sync + std::fmt::Debug {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
// //TODO complete this impl
|
||||||
|
// impl std::fmt::Debug for dyn NodeExporter {
|
||||||
|
// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
// f.write_fmt(format_args!("NodeExporter ",))
|
||||||
|
// }
|
||||||
|
// }
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use kube::api::DynamicObject;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -77,14 +76,6 @@ pub trait AlertReceiver<S: AlertSender>: std::fmt::Debug + Send + Sync {
|
|||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
fn clone_box(&self) -> Box<dyn AlertReceiver<S>>;
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct AlertManagerReceiver {
|
|
||||||
pub receiver_config: serde_json::Value,
|
|
||||||
// FIXME we should not leak k8s here. DynamicObject is k8s specific
|
|
||||||
pub additional_ressources: Vec<DynamicObject>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
|||||||
},
|
},
|
||||||
apimachinery::pkg::util::intstr::IntOrString,
|
apimachinery::pkg::util::intstr::IntOrString,
|
||||||
};
|
};
|
||||||
use kube::Resource;
|
use kube::{Resource, api::DynamicObject};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
|
id::Id,
|
||||||
net::{IpAddress, MacAddress},
|
net::{IpAddress, MacAddress},
|
||||||
switch::{PortDeclaration, PortLocation},
|
switch::{PortDeclaration, PortLocation},
|
||||||
};
|
};
|
||||||
use option_ext::OptionExt;
|
use option_ext::OptionExt;
|
||||||
|
|
||||||
use crate::topology::{SwitchClient, SwitchError};
|
use crate::topology::{PortConfig, SwitchClient, SwitchError};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BrocadeSwitchClient {
|
pub struct BrocadeSwitchClient {
|
||||||
@@ -18,9 +19,9 @@ impl BrocadeSwitchClient {
|
|||||||
ip_addresses: &[IpAddress],
|
ip_addresses: &[IpAddress],
|
||||||
username: &str,
|
username: &str,
|
||||||
password: &str,
|
password: &str,
|
||||||
options: Option<BrocadeOptions>,
|
options: BrocadeOptions,
|
||||||
) -> Result<Self, brocade::Error> {
|
) -> Result<Self, brocade::Error> {
|
||||||
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
let brocade = brocade::init(ip_addresses, username, password, options).await?;
|
||||||
Ok(Self { brocade })
|
Ok(Self { brocade })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -59,7 +60,7 @@ impl SwitchClient for BrocadeSwitchClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.brocade
|
self.brocade
|
||||||
.configure_interfaces(interfaces)
|
.configure_interfaces(&interfaces)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
|
||||||
@@ -111,6 +112,27 @@ impl SwitchClient for BrocadeSwitchClient {
|
|||||||
|
|
||||||
Ok(channel_id)
|
Ok(channel_id)
|
||||||
}
|
}
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||||
|
for i in ids {
|
||||||
|
self.brocade
|
||||||
|
.clear_port_channel(&i.to_string())
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||||
|
// FIXME hardcoded TenGigabitEthernet = bad
|
||||||
|
let ports = ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone()))
|
||||||
|
.collect();
|
||||||
|
self.brocade
|
||||||
|
.configure_interfaces(&ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -121,7 +143,7 @@ mod tests {
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use brocade::{
|
use brocade::{
|
||||||
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
||||||
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, SecurityLevel,
|
||||||
};
|
};
|
||||||
use harmony_types::switch::PortLocation;
|
use harmony_types::switch::PortLocation;
|
||||||
|
|
||||||
@@ -145,6 +167,7 @@ mod tests {
|
|||||||
|
|
||||||
client.setup().await.unwrap();
|
client.setup().await.unwrap();
|
||||||
|
|
||||||
|
//TODO not sure about this
|
||||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||||
assert_that!(*configured_interfaces).contains_exactly(vec![
|
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||||
(first_interface.name.clone(), PortOperatingMode::Access),
|
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||||
@@ -255,10 +278,10 @@ mod tests {
|
|||||||
|
|
||||||
async fn configure_interfaces(
|
async fn configure_interfaces(
|
||||||
&self,
|
&self,
|
||||||
interfaces: Vec<(String, PortOperatingMode)>,
|
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||||
*configured_interfaces = interfaces;
|
*configured_interfaces = interfaces.clone();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -279,6 +302,10 @@ mod tests {
|
|||||||
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FakeBrocadeClient {
|
impl FakeBrocadeClient {
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ pub struct InventoryRepositoryFactory;
|
|||||||
impl InventoryRepositoryFactory {
|
impl InventoryRepositoryFactory {
|
||||||
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
pub async fn build() -> Result<Box<dyn InventoryRepository>, RepoError> {
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
SqliteInventoryRepository::new(&(*DATABASE_URL)).await?,
|
SqliteInventoryRepository::new(&DATABASE_URL).await?,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn deployment_to_dynamic_roundtrip() {
|
fn deployment_to_dynamic_roundtrip() {
|
||||||
// Create a sample Deployment with nested structures
|
// Create a sample Deployment with nested structures
|
||||||
let mut deployment = Deployment {
|
let deployment = Deployment {
|
||||||
metadata: ObjectMeta {
|
metadata: ObjectMeta {
|
||||||
name: Some("my-deployment".to_string()),
|
name: Some("my-deployment".to_string()),
|
||||||
labels: Some({
|
labels: Some({
|
||||||
|
|||||||
@@ -4,5 +4,6 @@ pub mod hp_ilo;
|
|||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
pub mod kube;
|
pub mod kube;
|
||||||
|
pub mod network_manager;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
mod sqlx;
|
mod sqlx;
|
||||||
|
|||||||
264
harmony/src/infra/network_manager.rs
Normal file
264
harmony/src/infra/network_manager.rs
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use k8s_openapi::api::core::v1::Node;
|
||||||
|
use kube::{
|
||||||
|
ResourceExt,
|
||||||
|
api::{ObjectList, ObjectMeta},
|
||||||
|
};
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
modules::okd::crd::nmstate,
|
||||||
|
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
||||||
|
/// It is documented in nmstate official doc, but worth mentionning here :
|
||||||
|
///
|
||||||
|
/// - You create a bond, nmstate will apply it
|
||||||
|
/// - You delete de bond from nmstate, it will NOT delete it
|
||||||
|
/// - To delete it you have to update it with configuration set to null
|
||||||
|
pub struct OpenShiftNmStateNetworkManager {
|
||||||
|
k8s_client: Arc<K8sClient>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("OpenShiftNmStateNetworkManager").finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
|
debug!("Installing NMState controller...");
|
||||||
|
// TODO use operatorhub maybe?
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState namespace...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState service account...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role binding...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!("Creating NMState operator...");
|
||||||
|
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.k8s_client
|
||||||
|
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let nmstate = nmstate::NMState {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
debug!(
|
||||||
|
"Creating NMState:\n{}",
|
||||||
|
serde_yaml::to_string(&nmstate).unwrap()
|
||||||
|
);
|
||||||
|
self.k8s_client.apply(&nmstate, None).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||||
|
let hostname = self.get_hostname(&config.host_id).await.map_err(|e| {
|
||||||
|
NetworkError::new(format!(
|
||||||
|
"Can't configure bond, can't get hostname for host '{}': {e}",
|
||||||
|
config.host_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let bond_id = self.get_next_bond_id(&hostname).await.map_err(|e| {
|
||||||
|
NetworkError::new(format!(
|
||||||
|
"Can't configure bond, can't get an available bond id for host '{}': {e}",
|
||||||
|
config.host_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let bond_config = self.create_bond_configuration(&hostname, &bond_id, config);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Applying NMState bond config for host {}:\n{}",
|
||||||
|
config.host_id,
|
||||||
|
serde_yaml::to_string(&bond_config).unwrap(),
|
||||||
|
);
|
||||||
|
self.k8s_client
|
||||||
|
.apply(&bond_config, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| NetworkError::new(format!("Failed to configure bond: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenShiftNmStateNetworkManager {
|
||||||
|
pub fn new(k8s_client: Arc<K8sClient>) -> Self {
|
||||||
|
Self { k8s_client }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_bond_configuration(
|
||||||
|
&self,
|
||||||
|
host: &str,
|
||||||
|
bond_name: &str,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> nmstate::NodeNetworkConfigurationPolicy {
|
||||||
|
info!("Configuring bond '{bond_name}' for host '{host}'...");
|
||||||
|
|
||||||
|
let mut bond_mtu: Option<u32> = None;
|
||||||
|
let mut copy_mac_from: Option<String> = None;
|
||||||
|
let mut bond_ports = Vec::new();
|
||||||
|
let mut interfaces: Vec<nmstate::Interface> = Vec::new();
|
||||||
|
|
||||||
|
for switch_port in &config.switch_ports {
|
||||||
|
let interface_name = switch_port.interface.name.clone();
|
||||||
|
|
||||||
|
interfaces.push(nmstate::Interface {
|
||||||
|
name: interface_name.clone(),
|
||||||
|
description: Some(format!("Member of bond {bond_name}")),
|
||||||
|
r#type: nmstate::InterfaceType::Ethernet,
|
||||||
|
state: "up".to_string(),
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: None,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
bond_ports.push(interface_name.clone());
|
||||||
|
|
||||||
|
// Use the first port's details for the bond mtu and mac address
|
||||||
|
if bond_mtu.is_none() {
|
||||||
|
bond_mtu = Some(switch_port.interface.mtu);
|
||||||
|
}
|
||||||
|
if copy_mac_from.is_none() {
|
||||||
|
copy_mac_from = Some(interface_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces.push(nmstate::Interface {
|
||||||
|
name: bond_name.to_string(),
|
||||||
|
description: Some(format!("HARMONY - Network bond for host {host}")),
|
||||||
|
r#type: nmstate::InterfaceType::Bond,
|
||||||
|
state: "up".to_string(),
|
||||||
|
copy_mac_from,
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
autoconf: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: Some(nmstate::BondSpec {
|
||||||
|
mode: "802.3ad".to_string(),
|
||||||
|
ports: bond_ports,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
nmstate::NodeNetworkConfigurationPolicy {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("{host}-bond-config")),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: nmstate::NodeNetworkConfigurationPolicySpec {
|
||||||
|
node_selector: Some(BTreeMap::from([(
|
||||||
|
"kubernetes.io/hostname".to_string(),
|
||||||
|
host.to_string(),
|
||||||
|
)])),
|
||||||
|
desired_state: nmstate::NetworkState {
|
||||||
|
interfaces,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_hostname(&self, host_id: &Id) -> Result<String, String> {
|
||||||
|
let nodes: ObjectList<Node> = self
|
||||||
|
.k8s_client
|
||||||
|
.list_resources(None, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||||
|
|
||||||
|
let Some(node) = nodes.iter().find(|n| {
|
||||||
|
n.status
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.node_info.as_ref())
|
||||||
|
.map(|i| i.system_uuid == host_id.to_string())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}) else {
|
||||||
|
return Err(format!("No node found for host '{host_id}'"));
|
||||||
|
};
|
||||||
|
|
||||||
|
node.labels()
|
||||||
|
.get("kubernetes.io/hostname")
|
||||||
|
.ok_or(format!(
|
||||||
|
"Node '{host_id}' has no kubernetes.io/hostname label"
|
||||||
|
))
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_next_bond_id(&self, hostname: &str) -> Result<String, String> {
|
||||||
|
let network_state: Option<nmstate::NodeNetworkState> = self
|
||||||
|
.k8s_client
|
||||||
|
.get_resource(hostname, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||||
|
|
||||||
|
let interfaces = vec![];
|
||||||
|
let existing_bonds: Vec<&nmstate::Interface> = network_state
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|network_state| network_state.status.current_state.as_ref())
|
||||||
|
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
||||||
|
.iter()
|
||||||
|
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let used_ids: HashSet<u32> = existing_bonds
|
||||||
|
.iter()
|
||||||
|
.filter_map(|i| {
|
||||||
|
i.name
|
||||||
|
.strip_prefix("bond")
|
||||||
|
.and_then(|id| id.parse::<u32>().ok())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let next_id = (0..).find(|id| !used_ids.contains(id)).unwrap();
|
||||||
|
Ok(format!("bond{next_id}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,7 +10,7 @@ use super::OPNSenseFirewall;
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl DnsServer for OPNSenseFirewall {
|
impl DnsServer for OPNSenseFirewall {
|
||||||
async fn register_hosts(&self, hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
async fn register_hosts(&self, _hosts: Vec<DnsRecord>) -> Result<(), ExecutorError> {
|
||||||
todo!("Refactor this to use dnsmasq")
|
todo!("Refactor this to use dnsmasq")
|
||||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
// let mut dns = writable_opnsense.dns();
|
// let mut dns = writable_opnsense.dns();
|
||||||
@@ -68,7 +68,7 @@ impl DnsServer for OPNSenseFirewall {
|
|||||||
self.host.clone()
|
self.host.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn register_dhcp_leases(&self, register: bool) -> Result<(), ExecutorError> {
|
async fn register_dhcp_leases(&self, _register: bool) -> Result<(), ExecutorError> {
|
||||||
todo!("Refactor this to use dnsmasq")
|
todo!("Refactor this to use dnsmasq")
|
||||||
// let mut writable_opnsense = self.opnsense_config.write().await;
|
// let mut writable_opnsense = self.opnsense_config.write().await;
|
||||||
// let mut dns = writable_opnsense.dns();
|
// let mut dns = writable_opnsense.dns();
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ mod firewall;
|
|||||||
mod http;
|
mod http;
|
||||||
mod load_balancer;
|
mod load_balancer;
|
||||||
mod management;
|
mod management;
|
||||||
|
pub mod node_exporter;
|
||||||
mod tftp;
|
mod tftp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub use management::*;
|
pub use management::*;
|
||||||
use opnsense_config_xml::Host;
|
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||||
|
|||||||
47
harmony/src/infra/opnsense/node_exporter.rs
Normal file
47
harmony/src/infra/opnsense/node_exporter.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
executors::ExecutorError, infra::opnsense::OPNSenseFirewall,
|
||||||
|
topology::node_exporter::NodeExporter,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for OPNSenseFirewall {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
let mut config = self.opnsense_config.write().await;
|
||||||
|
let node_exporter = config.node_exporter();
|
||||||
|
if let Some(config) = node_exporter.get_full_config() {
|
||||||
|
debug!(
|
||||||
|
"Node exporter available in opnsense config, assuming it is already installed. {config:?}"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
config
|
||||||
|
.install_package("os-node_exporter")
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
ExecutorError::UnexpectedError(format!("Executor failed when trying to install os-node_exporter package with error {e:?}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
config
|
||||||
|
.node_exporter()
|
||||||
|
.enable(true)
|
||||||
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
OPNSenseFirewall::commit_config(self).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.opnsense_config
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.node_exporter()
|
||||||
|
.reload_restart()
|
||||||
|
.await
|
||||||
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -181,13 +181,11 @@ impl From<CDApplicationConfig> for ArgoApplication {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ArgoApplication {
|
impl ArgoApplication {
|
||||||
pub fn to_yaml(&self) -> serde_yaml::Value {
|
pub fn to_yaml(&self, target_namespace: Option<&str>) -> serde_yaml::Value {
|
||||||
let name = &self.name;
|
let name = &self.name;
|
||||||
let namespace = if let Some(ns) = self.namespace.as_ref() {
|
let default_ns = "argocd".to_string();
|
||||||
ns
|
let namespace: &str =
|
||||||
} else {
|
target_namespace.unwrap_or(self.namespace.as_ref().unwrap_or(&default_ns));
|
||||||
"argocd"
|
|
||||||
};
|
|
||||||
let project = &self.project;
|
let project = &self.project;
|
||||||
|
|
||||||
let yaml_str = format!(
|
let yaml_str = format!(
|
||||||
@@ -345,7 +343,7 @@ spec:
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
expected_yaml_output.trim(),
|
expected_yaml_output.trim(),
|
||||||
serde_yaml::to_string(&app.clone().to_yaml())
|
serde_yaml::to_string(&app.clone().to_yaml(None))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.trim()
|
.trim()
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,22 +1,21 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::hurl;
|
use harmony_macros::hurl;
|
||||||
use kube::{Api, api::GroupVersionKind};
|
use kube::{Api, api::GroupVersionKind};
|
||||||
use log::{debug, warn};
|
use log::{debug, info, trace, warn};
|
||||||
use non_blank_string_rs::NonBlankString;
|
use non_blank_string_rs::NonBlankString;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde::de::DeserializeOwned;
|
use std::{str::FromStr, sync::Arc};
|
||||||
use std::{process::Command, str::FromStr, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::helm::chart::{HelmChartScore, HelmRepository},
|
modules::{
|
||||||
score::Score,
|
argocd::{ArgoDeploymentType, detect_argo_deployment_type},
|
||||||
topology::{
|
helm::chart::{HelmChartScore, HelmRepository},
|
||||||
HelmCommand, K8sclient, PreparationError, PreparationOutcome, Topology, ingress::Ingress,
|
|
||||||
k8s::K8sClient,
|
|
||||||
},
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HelmCommand, K8sclient, Topology, ingress::Ingress, k8s::K8sClient},
|
||||||
};
|
};
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
|
||||||
@@ -25,6 +24,7 @@ use super::ArgoApplication;
|
|||||||
#[derive(Debug, Serialize, Clone)]
|
#[derive(Debug, Serialize, Clone)]
|
||||||
pub struct ArgoHelmScore {
|
pub struct ArgoHelmScore {
|
||||||
pub namespace: String,
|
pub namespace: String,
|
||||||
|
// TODO: remove and rely on topology (it now knows the flavor)
|
||||||
pub openshift: bool,
|
pub openshift: bool,
|
||||||
pub argo_apps: Vec<ArgoApplication>,
|
pub argo_apps: Vec<ArgoApplication>,
|
||||||
}
|
}
|
||||||
@@ -55,29 +55,98 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let k8s_client = topology.k8s_client().await?;
|
trace!("Starting ArgoInterpret execution {self:?}");
|
||||||
let svc = format!("argo-{}", self.score.namespace.clone());
|
let k8s_client: Arc<K8sClient> = topology.k8s_client().await?;
|
||||||
|
trace!("Got k8s client");
|
||||||
|
let desired_ns = self.score.namespace.clone();
|
||||||
|
|
||||||
|
debug!("ArgoInterpret detecting cluster configuration");
|
||||||
|
let svc = format!("argo-{}", desired_ns);
|
||||||
let domain = topology.get_domain(&svc).await?;
|
let domain = topology.get_domain(&svc).await?;
|
||||||
let helm_score =
|
debug!("Resolved Argo service domain for '{}': {}", svc, domain);
|
||||||
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
|
|
||||||
|
|
||||||
helm_score.interpret(inventory, topology).await?;
|
// Detect current Argo deployment type
|
||||||
|
let current = detect_argo_deployment_type(&k8s_client, &desired_ns).await?;
|
||||||
|
info!("Detected Argo deployment type: {:?}", current);
|
||||||
|
|
||||||
|
// Decide control namespace and whether we must install
|
||||||
|
let (control_ns, must_install) = match current.clone() {
|
||||||
|
ArgoDeploymentType::NotInstalled => {
|
||||||
|
info!(
|
||||||
|
"Argo CD not installed. Will install via Helm into namespace '{}'.",
|
||||||
|
desired_ns
|
||||||
|
);
|
||||||
|
(desired_ns.clone(), true)
|
||||||
|
}
|
||||||
|
ArgoDeploymentType::AvailableInDesiredNamespace(ns) => {
|
||||||
|
info!(
|
||||||
|
"Argo CD already installed by Harmony in '{}'. Skipping install.",
|
||||||
|
ns
|
||||||
|
);
|
||||||
|
(ns, false)
|
||||||
|
}
|
||||||
|
ArgoDeploymentType::InstalledClusterWide(ns) => {
|
||||||
|
info!("Argo CD installed cluster-wide in namespace '{}'.", ns);
|
||||||
|
(ns, false)
|
||||||
|
}
|
||||||
|
ArgoDeploymentType::InstalledNamespaceScoped(ns) => {
|
||||||
|
// TODO we could support this use case by installing a new argo instance. But that
|
||||||
|
// means handling a few cases that are out of scope for now :
|
||||||
|
// - Wether argo operator is installed
|
||||||
|
// - Managing CRD versions compatibility
|
||||||
|
// - Potentially handling the various k8s flavors and setups we might encounter
|
||||||
|
//
|
||||||
|
// There is a possibility that the helm chart already handles most or even all of these use cases but they are out of scope for now.
|
||||||
|
let msg = format!(
|
||||||
|
"Argo CD found in '{}' but it is namespace-scoped and not supported for attachment yet.",
|
||||||
|
ns
|
||||||
|
);
|
||||||
|
warn!("{}", msg);
|
||||||
|
return Err(InterpretError::new(msg));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("ArgoCD will be installed : {must_install} . Current argocd status : {current:?} ");
|
||||||
|
|
||||||
|
if must_install {
|
||||||
|
let helm_score = argo_helm_chart_score(&desired_ns, self.score.openshift, &domain);
|
||||||
|
info!(
|
||||||
|
"Installing Argo CD via Helm into namespace '{}' ...",
|
||||||
|
desired_ns
|
||||||
|
);
|
||||||
|
helm_score.interpret(inventory, topology).await?;
|
||||||
|
info!("Argo CD install complete in '{}'.", desired_ns);
|
||||||
|
}
|
||||||
|
|
||||||
|
let yamls: Vec<serde_yaml::Value> = self
|
||||||
|
.argo_apps
|
||||||
|
.iter()
|
||||||
|
.map(|a| a.to_yaml(Some(&control_ns)))
|
||||||
|
.collect();
|
||||||
|
info!(
|
||||||
|
"Applying {} Argo application object(s) into control namespace '{}'.",
|
||||||
|
yamls.len(),
|
||||||
|
control_ns
|
||||||
|
);
|
||||||
k8s_client
|
k8s_client
|
||||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
.apply_yaml_many(&yamls, Some(control_ns.as_str()))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.map_err(|e| InterpretError::new(format!("Failed applying Argo CRs: {e}")))?;
|
||||||
|
|
||||||
Ok(Outcome::success_with_details(
|
Ok(Outcome::success_with_details(
|
||||||
format!(
|
format!(
|
||||||
"ArgoCD {} {}",
|
"ArgoCD {} {}",
|
||||||
self.argo_apps.len(),
|
self.argo_apps.len(),
|
||||||
match self.argo_apps.len() {
|
if self.argo_apps.len() == 1 {
|
||||||
1 => "application",
|
"application"
|
||||||
_ => "applications",
|
} else {
|
||||||
|
"applications"
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
vec![format!("argo application: http://{}", domain)],
|
vec![
|
||||||
|
format!("control_namespace={}", control_ns),
|
||||||
|
format!("argo ui: http://{}", domain),
|
||||||
|
],
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,7 +155,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
fn get_version(&self) -> Version {
|
||||||
todo!()
|
Version::from("0.1.0").unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
fn get_status(&self) -> InterpretStatus {
|
||||||
@@ -94,39 +163,7 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
fn get_children(&self) -> Vec<Id> {
|
||||||
todo!()
|
vec![]
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ArgoInterpret {
|
|
||||||
pub async fn get_host_domain(
|
|
||||||
&self,
|
|
||||||
client: Arc<K8sClient>,
|
|
||||||
openshift: bool,
|
|
||||||
) -> Result<String, InterpretError> {
|
|
||||||
//This should be the job of the topology to determine if we are in
|
|
||||||
//openshift, potentially we need on openshift topology the same way we create a
|
|
||||||
//localhosttopology
|
|
||||||
match openshift {
|
|
||||||
true => {
|
|
||||||
let gvk = GroupVersionKind {
|
|
||||||
group: "operator.openshift.io".into(),
|
|
||||||
version: "v1".into(),
|
|
||||||
kind: "IngressController".into(),
|
|
||||||
};
|
|
||||||
let ic = client
|
|
||||||
.get_resource_json_value("default", Some("openshift-ingress-operator"), &gvk)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match ic.data["status"]["domain"].as_str() {
|
|
||||||
Some(domain) => return Ok(domain.to_string()),
|
|
||||||
None => return Err(InterpretError::new("Could not find domain".to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false => {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ use crate::{
|
|||||||
modules::application::{
|
modules::application::{
|
||||||
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
|
||||||
features::{ArgoApplication, ArgoHelmScore},
|
features::{ArgoApplication, ArgoHelmScore},
|
||||||
|
webapp::Webapp,
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{
|
topology::{
|
||||||
@@ -47,11 +48,11 @@ use crate::{
|
|||||||
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
|
||||||
/// - Kubernetes for runtime orchestration
|
/// - Kubernetes for runtime orchestration
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Default, Clone)]
|
||||||
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
|
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
|
||||||
pub application: Arc<A>,
|
pub application: Arc<A>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
|
||||||
async fn deploy_to_local_k3d(
|
async fn deploy_to_local_k3d(
|
||||||
&self,
|
&self,
|
||||||
app_name: String,
|
app_name: String,
|
||||||
@@ -137,7 +138,7 @@ impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<
|
impl<
|
||||||
A: OCICompliant + HelmPackage + Clone + 'static,
|
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
|
||||||
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
|
||||||
> ApplicationFeature<T> for PackagingDeployment<A>
|
> ApplicationFeature<T> for PackagingDeployment<A>
|
||||||
{
|
{
|
||||||
@@ -146,10 +147,15 @@ impl<
|
|||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<InstallationOutcome, InstallationError> {
|
) -> Result<InstallationOutcome, InstallationError> {
|
||||||
let image = self.application.image_name();
|
let image = self.application.image_name();
|
||||||
let domain = topology
|
|
||||||
.get_domain(&self.application.name())
|
let domain = if topology.current_target() == DeploymentTarget::Production {
|
||||||
.await
|
self.application.dns()
|
||||||
.map_err(|e| e.to_string())?;
|
} else {
|
||||||
|
topology
|
||||||
|
.get_domain(&self.application.name())
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?
|
||||||
|
};
|
||||||
|
|
||||||
// TODO Write CI/CD workflow files
|
// TODO Write CI/CD workflow files
|
||||||
// we can autotedect the CI type using the remote url (default to github action for github
|
// we can autotedect the CI type using the remote url (default to github action for github
|
||||||
@@ -193,8 +199,7 @@ impl<
|
|||||||
namespace: format!("{}", self.application.name()),
|
namespace: format!("{}", self.application.name()),
|
||||||
openshift: true,
|
openshift: true,
|
||||||
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
argo_apps: vec![ArgoApplication::from(CDApplicationConfig {
|
||||||
// helm pull oci://hub.nationtech.io/harmony/harmony-example-rust-webapp-chart --version 0.1.0
|
version: Version::from("0.2.1").unwrap(),
|
||||||
version: Version::from("0.1.0").unwrap(),
|
|
||||||
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
helm_chart_repo_url: "hub.nationtech.io/harmony".to_string(),
|
||||||
helm_chart_name: format!("{}-chart", self.application.name()),
|
helm_chart_name: format!("{}-chart", self.application.name()),
|
||||||
values_overrides: None,
|
values_overrides: None,
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use std::sync::Arc;
|
|||||||
use crate::modules::application::{
|
use crate::modules::application::{
|
||||||
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
Application, ApplicationFeature, InstallationError, InstallationOutcome,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::application_monitoring::application_monitoring_score::ApplicationMonitoringScore;
|
|
||||||
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
use crate::modules::monitoring::application_monitoring::rhobs_application_monitoring_score::ApplicationRHOBMonitoringScore;
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ mod feature;
|
|||||||
pub mod features;
|
pub mod features;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
mod rust;
|
mod rust;
|
||||||
|
mod webapp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub use feature::*;
|
pub use feature::*;
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ use tar::{Builder, Header};
|
|||||||
use walkdir::WalkDir;
|
use walkdir::WalkDir;
|
||||||
|
|
||||||
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
|
||||||
|
use crate::modules::application::webapp::Webapp;
|
||||||
use crate::{score::Score, topology::Topology};
|
use crate::{score::Score, topology::Topology};
|
||||||
|
|
||||||
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
|
||||||
@@ -60,6 +61,10 @@ pub struct RustWebapp {
|
|||||||
pub project_root: PathBuf,
|
pub project_root: PathBuf,
|
||||||
pub service_port: u32,
|
pub service_port: u32,
|
||||||
pub framework: Option<RustWebFramework>,
|
pub framework: Option<RustWebFramework>,
|
||||||
|
/// Host name that will be used in production environment.
|
||||||
|
///
|
||||||
|
/// This is the place to put the public host name if this is a public facing webapp.
|
||||||
|
pub dns: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Application for RustWebapp {
|
impl Application for RustWebapp {
|
||||||
@@ -68,6 +73,12 @@ impl Application for RustWebapp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Webapp for RustWebapp {
|
||||||
|
fn dns(&self) -> String {
|
||||||
|
self.dns.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl HelmPackage for RustWebapp {
|
impl HelmPackage for RustWebapp {
|
||||||
async fn build_push_helm_package(
|
async fn build_push_helm_package(
|
||||||
@@ -194,10 +205,10 @@ impl RustWebapp {
|
|||||||
Some(body_full(tar_data.into())),
|
Some(body_full(tar_data.into())),
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(mut msg) = image_build_stream.next().await {
|
while let Some(msg) = image_build_stream.next().await {
|
||||||
trace!("Got bollard msg {msg:?}");
|
trace!("Got bollard msg {msg:?}");
|
||||||
match msg {
|
match msg {
|
||||||
Ok(mut msg) => {
|
Ok(msg) => {
|
||||||
if let Some(progress) = msg.progress_detail {
|
if let Some(progress) = msg.progress_detail {
|
||||||
info!(
|
info!(
|
||||||
"Build progress {}/{}",
|
"Build progress {}/{}",
|
||||||
@@ -257,7 +268,6 @@ impl RustWebapp {
|
|||||||
".harmony_generated",
|
".harmony_generated",
|
||||||
"harmony",
|
"harmony",
|
||||||
"node_modules",
|
"node_modules",
|
||||||
"Dockerfile.harmony",
|
|
||||||
];
|
];
|
||||||
let mut entries: Vec<_> = WalkDir::new(project_root)
|
let mut entries: Vec<_> = WalkDir::new(project_root)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -461,52 +471,53 @@ impl RustWebapp {
|
|||||||
|
|
||||||
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
|
||||||
|
|
||||||
|
let app_name = &self.name;
|
||||||
|
let service_port = self.service_port;
|
||||||
// Create Chart.yaml
|
// Create Chart.yaml
|
||||||
let chart_yaml = format!(
|
let chart_yaml = format!(
|
||||||
r#"
|
r#"
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: {}
|
name: {chart_name}
|
||||||
description: A Helm chart for the {} web application.
|
description: A Helm chart for the {app_name} web application.
|
||||||
type: application
|
type: application
|
||||||
version: 0.1.0
|
version: 0.2.1
|
||||||
appVersion: "{}"
|
appVersion: "{image_tag}"
|
||||||
"#,
|
"#,
|
||||||
chart_name, self.name, image_tag
|
|
||||||
);
|
);
|
||||||
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
|
||||||
|
|
||||||
// Create values.yaml
|
// Create values.yaml
|
||||||
let values_yaml = format!(
|
let values_yaml = format!(
|
||||||
r#"
|
r#"
|
||||||
# Default values for {}.
|
# Default values for {chart_name}.
|
||||||
# This is a YAML-formatted file.
|
# This is a YAML-formatted file.
|
||||||
# Declare variables to be passed into your templates.
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: {}
|
repository: {image_repo}
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
# Overridden by the chart's appVersion
|
# Overridden by the chart's appVersion
|
||||||
tag: "{}"
|
tag: "{image_tag}"
|
||||||
|
|
||||||
service:
|
service:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
port: {}
|
port: {service_port}
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
tls: true
|
||||||
# Annotations for cert-manager to handle SSL.
|
# Annotations for cert-manager to handle SSL.
|
||||||
annotations:
|
annotations:
|
||||||
# Add other annotations like nginx ingress class if needed
|
# Add other annotations like nginx ingress class if needed
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
hosts:
|
hosts:
|
||||||
- host: {}
|
- host: {domain}
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
"#,
|
"#,
|
||||||
chart_name, image_repo, image_tag, self.service_port, domain,
|
|
||||||
);
|
);
|
||||||
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
|
||||||
|
|
||||||
@@ -583,7 +594,11 @@ spec:
|
|||||||
);
|
);
|
||||||
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
|
||||||
|
|
||||||
|
let service_port = self.service_port;
|
||||||
|
|
||||||
// Create templates/ingress.yaml
|
// Create templates/ingress.yaml
|
||||||
|
// TODO get issuer name and tls config from topology as it may be different from one
|
||||||
|
// cluster to another, also from one version to another
|
||||||
let ingress_yaml = format!(
|
let ingress_yaml = format!(
|
||||||
r#"
|
r#"
|
||||||
{{{{- if $.Values.ingress.enabled -}}}}
|
{{{{- if $.Values.ingress.enabled -}}}}
|
||||||
@@ -596,13 +611,11 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
{{{{- if $.Values.ingress.tls }}}}
|
{{{{- if $.Values.ingress.tls }}}}
|
||||||
tls:
|
tls:
|
||||||
{{{{- range $.Values.ingress.tls }}}}
|
- secretName: {{{{ include "chart.fullname" . }}}}-tls
|
||||||
- hosts:
|
hosts:
|
||||||
{{{{- range .hosts }}}}
|
{{{{- range $.Values.ingress.hosts }}}}
|
||||||
- {{{{ . | quote }}}}
|
- {{{{ .host | quote }}}}
|
||||||
{{{{- end }}}}
|
{{{{- end }}}}
|
||||||
secretName: {{{{ .secretName }}}}
|
|
||||||
{{{{- end }}}}
|
|
||||||
{{{{- end }}}}
|
{{{{- end }}}}
|
||||||
rules:
|
rules:
|
||||||
{{{{- range $.Values.ingress.hosts }}}}
|
{{{{- range $.Values.ingress.hosts }}}}
|
||||||
@@ -616,12 +629,11 @@ spec:
|
|||||||
service:
|
service:
|
||||||
name: {{{{ include "chart.fullname" $ }}}}
|
name: {{{{ include "chart.fullname" $ }}}}
|
||||||
port:
|
port:
|
||||||
number: {{{{ $.Values.service.port | default {} }}}}
|
number: {{{{ $.Values.service.port | default {service_port} }}}}
|
||||||
{{{{- end }}}}
|
{{{{- end }}}}
|
||||||
{{{{- end }}}}
|
{{{{- end }}}}
|
||||||
{{{{- end }}}}
|
{{{{- end }}}}
|
||||||
"#,
|
"#,
|
||||||
self.service_port
|
|
||||||
);
|
);
|
||||||
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;
|
||||||
|
|
||||||
|
|||||||
7
harmony/src/modules/application/webapp.rs
Normal file
7
harmony/src/modules/application/webapp.rs
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
use super::Application;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Webapp: Application {
|
||||||
|
fn dns(&self) -> String;
|
||||||
|
}
|
||||||
208
harmony/src/modules/argocd/mod.rs
Normal file
208
harmony/src/modules/argocd/mod.rs
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use log::{debug, info};
|
||||||
|
|
||||||
|
use crate::{interpret::InterpretError, topology::k8s::K8sClient};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ArgoScope {
|
||||||
|
ClusterWide(String),
|
||||||
|
NamespaceScoped(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DiscoveredArgo {
|
||||||
|
pub control_namespace: String,
|
||||||
|
pub scope: ArgoScope,
|
||||||
|
pub has_crds: bool,
|
||||||
|
pub has_applicationset: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ArgoDeploymentType {
|
||||||
|
NotInstalled,
|
||||||
|
AvailableInDesiredNamespace(String),
|
||||||
|
InstalledClusterWide(String),
|
||||||
|
InstalledNamespaceScoped(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn discover_argo_all(
|
||||||
|
k8s: &Arc<K8sClient>,
|
||||||
|
) -> Result<Vec<DiscoveredArgo>, InterpretError> {
|
||||||
|
use log::{debug, info, trace, warn};
|
||||||
|
|
||||||
|
trace!("Starting Argo discovery");
|
||||||
|
|
||||||
|
// CRDs
|
||||||
|
let mut has_crds = true;
|
||||||
|
let required_crds = vec!["applications.argoproj.io", "appprojects.argoproj.io"];
|
||||||
|
trace!("Checking required Argo CRDs: {:?}", required_crds);
|
||||||
|
|
||||||
|
for crd in required_crds {
|
||||||
|
trace!("Verifying CRD presence: {crd}");
|
||||||
|
let crd_exists = k8s.has_crd(crd).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to verify existence of CRD {crd}: {e}"))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
debug!("CRD {crd} exists: {crd_exists}");
|
||||||
|
if !crd_exists {
|
||||||
|
info!(
|
||||||
|
"Missing Argo CRD {crd}, looks like Argo CD is not installed (or partially installed)"
|
||||||
|
);
|
||||||
|
has_crds = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/part-of=argocd"
|
||||||
|
);
|
||||||
|
let mut candidate_namespaces = k8s
|
||||||
|
.list_namespaces_with_healthy_deployments("app.kubernetes.io/part-of=argocd")
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?;
|
||||||
|
trace!(
|
||||||
|
"Listing namespaces with healthy Argo CD deployments using selector app.kubernetes.io/name=argo-cd"
|
||||||
|
);
|
||||||
|
candidate_namespaces.append(
|
||||||
|
&mut k8s
|
||||||
|
.list_namespaces_with_healthy_deployments("app.kubernetes.io/name=argo-cd")
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("List healthy argocd deployments: {e}")))?,
|
||||||
|
);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Discovered {} candidate namespace(s) for Argo CD: {:?}",
|
||||||
|
candidate_namespaces.len(),
|
||||||
|
candidate_namespaces
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut found = Vec::new();
|
||||||
|
for ns in candidate_namespaces {
|
||||||
|
trace!("Evaluating namespace '{ns}' for Argo CD instance");
|
||||||
|
|
||||||
|
// Require the application-controller to be healthy (sanity check)
|
||||||
|
trace!(
|
||||||
|
"Checking healthy deployment with label app.kubernetes.io/name=argocd-application-controller in namespace '{ns}'"
|
||||||
|
);
|
||||||
|
let controller_ok = k8s
|
||||||
|
.has_healthy_deployment_with_label(
|
||||||
|
&ns,
|
||||||
|
"app.kubernetes.io/name=argocd-application-controller",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
warn!(
|
||||||
|
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||||
|
);
|
||||||
|
false
|
||||||
|
}) || k8s
|
||||||
|
.has_healthy_deployment_with_label(
|
||||||
|
&ns,
|
||||||
|
"app.kubernetes.io/component=controller",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
warn!(
|
||||||
|
"Error while checking application-controller health in namespace '{ns}': {e}"
|
||||||
|
);
|
||||||
|
false
|
||||||
|
});
|
||||||
|
debug!("Namespace '{ns}': application-controller healthy = {controller_ok}");
|
||||||
|
|
||||||
|
if !controller_ok {
|
||||||
|
trace!("Skipping namespace '{ns}' because application-controller is not healthy");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("Determining Argo CD scope for namespace '{ns}' (cluster-wide vs namespace-scoped)");
|
||||||
|
|
||||||
|
let sa = k8s
|
||||||
|
.get_controller_service_account_name(&ns)
|
||||||
|
.await?
|
||||||
|
.unwrap_or("argocd-application-controller".to_string());
|
||||||
|
let scope = match k8s.is_service_account_cluster_wide(&sa, &ns).await {
|
||||||
|
Ok(true) => {
|
||||||
|
debug!("Namespace '{ns}' identified as cluster-wide Argo CD control plane");
|
||||||
|
ArgoScope::ClusterWide(ns.to_string())
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
debug!("Namespace '{ns}' identified as namespace-scoped Argo CD control plane");
|
||||||
|
ArgoScope::NamespaceScoped(ns.to_string())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to determine Argo CD scope for namespace '{ns}': {e}. Assuming namespace-scoped."
|
||||||
|
);
|
||||||
|
ArgoScope::NamespaceScoped(ns.to_string())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!("Checking optional ApplicationSet CRD (applicationsets.argoproj.io)");
|
||||||
|
let has_applicationset = match k8s.has_crd("applicationsets.argoproj.io").await {
|
||||||
|
Ok(v) => {
|
||||||
|
debug!("applicationsets.argoproj.io present: {v}");
|
||||||
|
v
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to check applicationsets.argoproj.io CRD: {e}. Assuming absent.");
|
||||||
|
false
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let argo = DiscoveredArgo {
|
||||||
|
control_namespace: ns.clone(),
|
||||||
|
scope,
|
||||||
|
has_crds,
|
||||||
|
has_applicationset,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!("Discovered Argo instance in '{ns}': {argo:?}");
|
||||||
|
found.push(argo);
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.is_empty() {
|
||||||
|
info!("No Argo CD installations discovered");
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Argo CD discovery complete: {} instance(s) found",
|
||||||
|
found.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(found)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn detect_argo_deployment_type(
|
||||||
|
k8s: &Arc<K8sClient>,
|
||||||
|
desired_namespace: &str,
|
||||||
|
) -> Result<ArgoDeploymentType, InterpretError> {
|
||||||
|
let discovered = discover_argo_all(k8s).await?;
|
||||||
|
debug!("Discovered argo instances {discovered:?}");
|
||||||
|
|
||||||
|
if discovered.is_empty() {
|
||||||
|
return Ok(ArgoDeploymentType::NotInstalled);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(d) = discovered
|
||||||
|
.iter()
|
||||||
|
.find(|d| d.control_namespace == desired_namespace)
|
||||||
|
{
|
||||||
|
return Ok(ArgoDeploymentType::AvailableInDesiredNamespace(
|
||||||
|
d.control_namespace.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(d) = discovered
|
||||||
|
.iter()
|
||||||
|
.find(|d| matches!(d.scope, ArgoScope::ClusterWide(_)))
|
||||||
|
{
|
||||||
|
return Ok(ArgoDeploymentType::InstalledClusterWide(
|
||||||
|
d.control_namespace.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ArgoDeploymentType::InstalledNamespaceScoped(
|
||||||
|
discovered[0].control_namespace.clone(),
|
||||||
|
))
|
||||||
|
}
|
||||||
116
harmony/src/modules/brocade.rs
Normal file
116
harmony/src/modules/brocade.rs
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use brocade::BrocadeOptions;
|
||||||
|
use harmony_secret::{Secret, SecretManager};
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::Topology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct BrocadeEnableSnmpScore {
|
||||||
|
pub switch_ips: Vec<IpAddr>,
|
||||||
|
pub dry_run: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology> Score<T> for BrocadeEnableSnmpScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"BrocadeEnableSnmpScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(BrocadeEnableSnmpInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct BrocadeEnableSnmpInterpret {
|
||||||
|
score: BrocadeEnableSnmpScore,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct BrocadeSwitchAuth {
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct BrocadeSnmpAuth {
|
||||||
|
username: String,
|
||||||
|
auth_password: String,
|
||||||
|
des_password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology> Interpret<T> for BrocadeEnableSnmpInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
_topology: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let switch_addresses = &self.score.switch_ips;
|
||||||
|
|
||||||
|
let snmp_auth = SecretManager::get_or_prompt::<BrocadeSnmpAuth>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let brocade = brocade::init(
|
||||||
|
&switch_addresses,
|
||||||
|
&config.username,
|
||||||
|
&config.password,
|
||||||
|
BrocadeOptions {
|
||||||
|
dry_run: self.score.dry_run,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Brocade client failed to connect");
|
||||||
|
|
||||||
|
brocade
|
||||||
|
.enable_snmp(
|
||||||
|
&snmp_auth.username,
|
||||||
|
&snmp_auth.auth_password,
|
||||||
|
&snmp_auth.des_password,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"Activated snmp server for Brocade at {}",
|
||||||
|
switch_addresses
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(", ")
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("BrocadeEnableSnmpInterpret")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,8 +19,11 @@ pub struct DhcpScore {
|
|||||||
pub host_binding: Vec<HostBinding>,
|
pub host_binding: Vec<HostBinding>,
|
||||||
pub next_server: Option<IpAddress>,
|
pub next_server: Option<IpAddress>,
|
||||||
pub boot_filename: Option<String>,
|
pub boot_filename: Option<String>,
|
||||||
|
/// Boot filename to be provided to PXE clients identifying as BIOS
|
||||||
pub filename: Option<String>,
|
pub filename: Option<String>,
|
||||||
|
/// Boot filename to be provided to PXE clients identifying as uefi but NOT iPXE
|
||||||
pub filename64: Option<String>,
|
pub filename64: Option<String>,
|
||||||
|
/// Boot filename to be provided to PXE clients identifying as iPXE
|
||||||
pub filenameipxe: Option<String>,
|
pub filenameipxe: Option<String>,
|
||||||
pub dhcp_range: (IpAddress, IpAddress),
|
pub dhcp_range: (IpAddress, IpAddress),
|
||||||
pub domain: Option<String>,
|
pub domain: Option<String>,
|
||||||
|
|||||||
@@ -5,11 +5,10 @@ use serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
hardware::PhysicalHost,
|
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::inventory::LaunchDiscoverInventoryAgentScore,
|
modules::inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::Topology,
|
topology::Topology,
|
||||||
};
|
};
|
||||||
@@ -17,11 +16,13 @@ use crate::{
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct DiscoverHostForRoleScore {
|
pub struct DiscoverHostForRoleScore {
|
||||||
pub role: HostRole,
|
pub role: HostRole,
|
||||||
|
pub number_desired_hosts: i16,
|
||||||
|
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
|
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"DiscoverInventoryAgentScore".to_string()
|
format!("DiscoverHostForRoleScore({:?})", self.role)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
@@ -48,13 +49,15 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
);
|
);
|
||||||
LaunchDiscoverInventoryAgentScore {
|
LaunchDiscoverInventoryAgentScore {
|
||||||
discovery_timeout: None,
|
discovery_timeout: None,
|
||||||
|
discovery_strategy: self.score.discovery_strategy.clone(),
|
||||||
}
|
}
|
||||||
.interpret(inventory, topology)
|
.interpret(inventory, topology)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let host: PhysicalHost;
|
let mut chosen_hosts = vec![];
|
||||||
let host_repo = InventoryRepositoryFactory::build().await?;
|
let host_repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
|
||||||
|
let mut assigned_hosts = 0;
|
||||||
loop {
|
loop {
|
||||||
let all_hosts = host_repo.get_all_hosts().await?;
|
let all_hosts = host_repo.get_all_hosts().await?;
|
||||||
|
|
||||||
@@ -74,12 +77,25 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
|
|
||||||
match ans {
|
match ans {
|
||||||
Ok(choice) => {
|
Ok(choice) => {
|
||||||
info!("Selected {} as the bootstrap node.", choice.summary());
|
info!(
|
||||||
|
"Assigned role {:?} for node {}",
|
||||||
|
self.score.role,
|
||||||
|
choice.summary()
|
||||||
|
);
|
||||||
host_repo
|
host_repo
|
||||||
.save_role_mapping(&self.score.role, &choice)
|
.save_role_mapping(&self.score.role, &choice)
|
||||||
.await?;
|
.await?;
|
||||||
host = choice;
|
chosen_hosts.push(choice);
|
||||||
break;
|
assigned_hosts += 1;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Found {assigned_hosts} hosts for role {:?}",
|
||||||
|
self.score.role
|
||||||
|
);
|
||||||
|
|
||||||
|
if assigned_hosts == self.score.number_desired_hosts {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(inquire::InquireError::OperationCanceled) => {
|
Err(inquire::InquireError::OperationCanceled) => {
|
||||||
info!("Refresh requested. Fetching list of discovered hosts again...");
|
info!("Refresh requested. Fetching list of discovered hosts again...");
|
||||||
@@ -90,17 +106,19 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
"Failed to select node for role {:?} : {}",
|
"Failed to select node for role {:?} : {}",
|
||||||
self.score.role, e
|
self.score.role, e
|
||||||
);
|
);
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!("Could not select host : {e}")));
|
||||||
"Could not select host : {}",
|
|
||||||
e.to_string()
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
Ok(Outcome::success(format!(
|
||||||
"Successfully discovered host {} for role {:?}",
|
"Successfully discovered {} hosts {} for role {:?}",
|
||||||
host.summary(),
|
self.score.number_desired_hosts,
|
||||||
|
chosen_hosts
|
||||||
|
.iter()
|
||||||
|
.map(|h| h.summary())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", "),
|
||||||
self.score.role
|
self.score.role
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
mod discovery;
|
mod discovery;
|
||||||
pub mod inspect;
|
pub mod inspect;
|
||||||
|
use std::net::Ipv4Addr;
|
||||||
|
|
||||||
|
use cidr::{Ipv4Cidr, Ipv4Inet};
|
||||||
pub use discovery::*;
|
pub use discovery::*;
|
||||||
|
use tokio::time::{Duration, timeout};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
||||||
@@ -24,6 +28,7 @@ use harmony_types::id::Id;
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct LaunchDiscoverInventoryAgentScore {
|
pub struct LaunchDiscoverInventoryAgentScore {
|
||||||
pub discovery_timeout: Option<u64>,
|
pub discovery_timeout: Option<u64>,
|
||||||
|
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
||||||
@@ -43,6 +48,12 @@ struct DiscoverInventoryAgentInterpret {
|
|||||||
score: LaunchDiscoverInventoryAgentScore,
|
score: LaunchDiscoverInventoryAgentScore,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub enum HarmonyDiscoveryStrategy {
|
||||||
|
MDNS,
|
||||||
|
SUBNET { cidr: cidr::Ipv4Cidr, port: u16 },
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||||
async fn execute(
|
async fn execute(
|
||||||
@@ -57,6 +68,37 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
match self.score.discovery_strategy {
|
||||||
|
HarmonyDiscoveryStrategy::MDNS => self.launch_mdns_discovery().await,
|
||||||
|
HarmonyDiscoveryStrategy::SUBNET { cidr, port } => {
|
||||||
|
self.launch_cidr_discovery(&cidr, port).await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"Discovery process completed successfully".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::DiscoverInventoryAgent
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiscoverInventoryAgentInterpret {
|
||||||
|
async fn launch_mdns_discovery(&self) {
|
||||||
harmony_inventory_agent::local_presence::discover_agents(
|
harmony_inventory_agent::local_presence::discover_agents(
|
||||||
self.score.discovery_timeout,
|
self.score.discovery_timeout,
|
||||||
|event: DiscoveryEvent| -> Result<(), String> {
|
|event: DiscoveryEvent| -> Result<(), String> {
|
||||||
@@ -88,6 +130,103 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
trace!("Found host information {host:?}");
|
trace!("Found host information {host:?}");
|
||||||
// TODO its useless to have two distinct host types but requires a bit much
|
// TODO its useless to have two distinct host types but requires a bit much
|
||||||
// refactoring to do it now
|
// refactoring to do it now
|
||||||
|
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||||
|
storage_drives,
|
||||||
|
storage_controller: _,
|
||||||
|
memory_modules,
|
||||||
|
cpus,
|
||||||
|
chipset: _,
|
||||||
|
network_interfaces,
|
||||||
|
management_interface: _,
|
||||||
|
host_uuid,
|
||||||
|
} = host;
|
||||||
|
|
||||||
|
let host = PhysicalHost {
|
||||||
|
id: Id::from(host_uuid),
|
||||||
|
category: HostCategory::Server,
|
||||||
|
network: network_interfaces,
|
||||||
|
storage: storage_drives,
|
||||||
|
labels: vec![Label {
|
||||||
|
name: "discovered-by".to_string(),
|
||||||
|
value: "harmony-inventory-agent".to_string(),
|
||||||
|
}],
|
||||||
|
memory_modules,
|
||||||
|
cpus,
|
||||||
|
};
|
||||||
|
|
||||||
|
// FIXME only save the host when it is new or something changed in it.
|
||||||
|
// we currently are saving the host every time it is discovered.
|
||||||
|
let repo = InventoryRepositoryFactory::build()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Could not build repository : {e}"))
|
||||||
|
.unwrap();
|
||||||
|
repo.save(&host)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Could not save host : {e}"))
|
||||||
|
.unwrap();
|
||||||
|
info!(
|
||||||
|
"Saved new host id {}, summary : {}",
|
||||||
|
host.id,
|
||||||
|
host.summary()
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
_ => debug!("Unhandled event {event:?}"),
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
// async fn launch_cidr_discovery(&self, cidr : &Ipv4Cidr, port: u16) {
|
||||||
|
// todo!("launnch cidr discovery for {cidr} : {port}
|
||||||
|
// - Iterate over all possible addresses in cidr
|
||||||
|
// - make calls in batches of 20 attempting to reach harmony inventory agent on <addr, port> using same as above harmony_inventory_agent::client::get_host_inventory(&address, port)
|
||||||
|
// - Log warn when response is 404, it means the port was used by something else unexpected
|
||||||
|
// - Log error when response is 5xx
|
||||||
|
// - Log debug when no response (timeout 15 seconds)
|
||||||
|
// - Log info when found and response is 2xx
|
||||||
|
// ");
|
||||||
|
// }
|
||||||
|
async fn launch_cidr_discovery(&self, cidr: &Ipv4Cidr, port: u16) {
|
||||||
|
let addrs: Vec<Ipv4Inet> = cidr.iter().collect();
|
||||||
|
let total = addrs.len();
|
||||||
|
info!(
|
||||||
|
"Starting CIDR discovery for {} hosts on {}/{} (port {})",
|
||||||
|
total,
|
||||||
|
cidr.network_length(),
|
||||||
|
cidr,
|
||||||
|
port
|
||||||
|
);
|
||||||
|
|
||||||
|
let batch_size: usize = 20;
|
||||||
|
let timeout_secs = 5;
|
||||||
|
let request_timeout = Duration::from_secs(timeout_secs);
|
||||||
|
|
||||||
|
let mut current_batch = 0;
|
||||||
|
let num_batches = addrs.len() / batch_size;
|
||||||
|
|
||||||
|
for batch in addrs.chunks(batch_size) {
|
||||||
|
current_batch += 1;
|
||||||
|
info!("Starting query batch {current_batch} of {num_batches}, timeout {timeout_secs}");
|
||||||
|
let mut tasks = Vec::with_capacity(batch.len());
|
||||||
|
|
||||||
|
for addr in batch {
|
||||||
|
let addr = addr.address().to_string();
|
||||||
|
let port = port;
|
||||||
|
|
||||||
|
let task = tokio::spawn(async move {
|
||||||
|
match timeout(
|
||||||
|
request_timeout,
|
||||||
|
harmony_inventory_agent::client::get_host_inventory(&addr, port),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Ok(host)) => {
|
||||||
|
info!("Found and response is 2xx for {addr}:{port}");
|
||||||
|
|
||||||
|
// Reuse the same conversion to PhysicalHost as MDNS flow
|
||||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||||
storage_drives,
|
storage_drives,
|
||||||
storage_controller,
|
storage_controller,
|
||||||
@@ -112,45 +251,36 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
|||||||
cpus,
|
cpus,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Save host to inventory
|
||||||
let repo = InventoryRepositoryFactory::build()
|
let repo = InventoryRepositoryFactory::build()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Could not build repository : {e}"))
|
.map_err(|e| format!("Could not build repository : {e}"))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
repo.save(&host)
|
if let Err(e) = repo.save(&host).await {
|
||||||
.await
|
log::debug!("Failed to save host {}: {e}", host.id);
|
||||||
.map_err(|e| format!("Could not save host : {e}"))
|
} else {
|
||||||
.unwrap();
|
info!("Saved host id {}, summary : {}", host.id, host.summary());
|
||||||
info!(
|
}
|
||||||
"Saved new host id {}, summary : {}",
|
}
|
||||||
host.id,
|
Ok(Err(e)) => {
|
||||||
host.summary()
|
log::info!("Error querying inventory agent on {addr}:{port} : {e}");
|
||||||
);
|
}
|
||||||
});
|
Err(_) => {
|
||||||
|
// Timeout for this host
|
||||||
|
log::debug!("No response (timeout) for {addr}:{port}");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => debug!("Unhandled event {event:?}"),
|
});
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"Discovery process completed successfully".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
tasks.push(task);
|
||||||
InterpretName::DiscoverInventoryAgent
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
// Wait for this batch to complete
|
||||||
todo!()
|
for t in tasks {
|
||||||
}
|
let _ = t.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
info!("CIDR discovery completed");
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,157 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use k8s_openapi::{
|
||||||
|
api::core::v1::{Affinity, Toleration},
|
||||||
|
apimachinery::pkg::apis::meta::v1::ObjectMeta,
|
||||||
|
};
|
||||||
|
use kube::CustomResource;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "CatalogSource",
|
||||||
|
plural = "catalogsources",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct CatalogSourceSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub address: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config_map: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub description: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub display_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub grpc_pod_config: Option<GrpcPodConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub icon: Option<Icon>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub image: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<i64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub publisher: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub run_as_root: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub secrets: Option<Vec<String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub source_type: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub update_strategy: Option<UpdateStrategy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct GrpcPodConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub affinity: Option<Affinity>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub extract_content: Option<ExtractContent>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub memory_target: Option<Value>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority_class_name: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub security_context_config: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ExtractContent {
|
||||||
|
pub cache_dir: String,
|
||||||
|
pub catalog_dir: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Icon {
|
||||||
|
pub base64data: String,
|
||||||
|
pub mediatype: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct UpdateStrategy {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub registry_poll: Option<RegistryPoll>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct RegistryPoll {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub interval: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSource {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: CatalogSourceSpec {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CatalogSourceSpec {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
address: None,
|
||||||
|
config_map: None,
|
||||||
|
description: None,
|
||||||
|
display_name: None,
|
||||||
|
grpc_pod_config: None,
|
||||||
|
icon: None,
|
||||||
|
image: None,
|
||||||
|
priority: None,
|
||||||
|
publisher: None,
|
||||||
|
run_as_root: None,
|
||||||
|
secrets: None,
|
||||||
|
source_type: None,
|
||||||
|
update_strategy: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
mod catalogsources_operators_coreos_com;
|
||||||
|
pub use catalogsources_operators_coreos_com::*;
|
||||||
|
mod subscriptions_operators_coreos_com;
|
||||||
|
pub use subscriptions_operators_coreos_com::*;
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use kube::CustomResource;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[kube(
|
||||||
|
group = "operators.coreos.com",
|
||||||
|
version = "v1alpha1",
|
||||||
|
kind = "Subscription",
|
||||||
|
plural = "subscriptions",
|
||||||
|
namespaced = true,
|
||||||
|
schema = "disabled"
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub channel: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<SubscriptionConfig>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub install_plan_approval: Option<String>,
|
||||||
|
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
pub source: String,
|
||||||
|
|
||||||
|
pub source_namespace: String,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub starting_csv: Option<String>,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct SubscriptionConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub env: Option<Vec<k8s_openapi::api::core::v1::EnvVar>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node_selector: Option<std::collections::BTreeMap<String, String>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tolerations: Option<Vec<k8s_openapi::api::core::v1::Toleration>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Subscription {
|
||||||
|
fn default() -> Self {
|
||||||
|
Subscription {
|
||||||
|
metadata: ObjectMeta::default(),
|
||||||
|
spec: SubscriptionSpec::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SubscriptionSpec {
|
||||||
|
fn default() -> SubscriptionSpec {
|
||||||
|
SubscriptionSpec {
|
||||||
|
name: String::new(),
|
||||||
|
source: String::new(),
|
||||||
|
source_namespace: String::new(),
|
||||||
|
channel: None,
|
||||||
|
config: None,
|
||||||
|
install_plan_approval: None,
|
||||||
|
starting_csv: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mod operatorhub;
|
||||||
|
pub use operatorhub::*;
|
||||||
|
pub mod crd;
|
||||||
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Write operatorhub catalog score
|
||||||
|
// for now this will only support on OKD with the default catalog and operatorhub setup and does not verify OLM state or anything else. Very opinionated and bare-bones to start
|
||||||
|
|
||||||
|
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::interpret::Interpret;
|
||||||
|
use crate::modules::k8s::apps::crd::{
|
||||||
|
CatalogSource, CatalogSourceSpec, RegistryPoll, UpdateStrategy,
|
||||||
|
};
|
||||||
|
use crate::modules::k8s::resource::K8sResourceScore;
|
||||||
|
use crate::score::Score;
|
||||||
|
use crate::topology::{K8sclient, Topology};
|
||||||
|
|
||||||
|
/// Installs the CatalogSource in a cluster which already has the required services and CRDs installed.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use harmony::modules::k8s::apps::OperatorHubCatalogSourceScore;
|
||||||
|
///
|
||||||
|
/// let score = OperatorHubCatalogSourceScore::default();
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Required services:
|
||||||
|
/// - catalog-operator
|
||||||
|
/// - olm-operator
|
||||||
|
///
|
||||||
|
/// They are installed by default with OKD/Openshift
|
||||||
|
///
|
||||||
|
/// **Warning** : this initial implementation does not manage the dependencies. They must already
|
||||||
|
/// exist in the cluster.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct OperatorHubCatalogSourceScore {
|
||||||
|
pub name: String,
|
||||||
|
pub namespace: String,
|
||||||
|
pub image: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OperatorHubCatalogSourceScore {
|
||||||
|
pub fn new(name: &str, namespace: &str, image: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
namespace: namespace.to_string(),
|
||||||
|
image: image.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OperatorHubCatalogSourceScore {
|
||||||
|
/// This default implementation will create this k8s resource :
|
||||||
|
///
|
||||||
|
/// ```yaml
|
||||||
|
/// apiVersion: operators.coreos.com/v1alpha1
|
||||||
|
/// kind: CatalogSource
|
||||||
|
/// metadata:
|
||||||
|
/// name: operatorhubio-catalog
|
||||||
|
/// namespace: openshift-marketplace
|
||||||
|
/// spec:
|
||||||
|
/// sourceType: grpc
|
||||||
|
/// image: quay.io/operatorhubio/catalog:latest
|
||||||
|
/// displayName: Operatorhub Operators
|
||||||
|
/// publisher: OperatorHub.io
|
||||||
|
/// updateStrategy:
|
||||||
|
/// registryPoll:
|
||||||
|
/// interval: 60m
|
||||||
|
/// ```
|
||||||
|
fn default() -> Self {
|
||||||
|
OperatorHubCatalogSourceScore {
|
||||||
|
name: "operatorhubio-catalog".to_string(),
|
||||||
|
namespace: "openshift-marketplace".to_string(),
|
||||||
|
image: "quay.io/operatorhubio/catalog:latest".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Topology + K8sclient> Score<T> for OperatorHubCatalogSourceScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
let metadata = ObjectMeta {
|
||||||
|
name: Some(self.name.clone()),
|
||||||
|
namespace: Some(self.namespace.clone()),
|
||||||
|
..ObjectMeta::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = CatalogSourceSpec {
|
||||||
|
source_type: Some("grpc".to_string()),
|
||||||
|
image: Some(self.image.clone()),
|
||||||
|
display_name: Some("Operatorhub Operators".to_string()),
|
||||||
|
publisher: Some("OperatorHub.io".to_string()),
|
||||||
|
update_strategy: Some(UpdateStrategy {
|
||||||
|
registry_poll: Some(RegistryPoll {
|
||||||
|
interval: Some("60m".to_string()),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
..CatalogSourceSpec::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let catalog_source = CatalogSource {
|
||||||
|
metadata,
|
||||||
|
spec: spec,
|
||||||
|
};
|
||||||
|
|
||||||
|
K8sResourceScore::single(catalog_source, Some(self.namespace.clone())).create_interpret()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("OperatorHubCatalogSourceScore({})", self.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod apps;
|
||||||
pub mod deployment;
|
pub mod deployment;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
pub mod namespace;
|
pub mod namespace;
|
||||||
|
|||||||
@@ -38,13 +38,15 @@ impl<
|
|||||||
+ 'static
|
+ 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Clone,
|
+ Clone,
|
||||||
T: Topology,
|
T: Topology + K8sclient,
|
||||||
> Score<T> for K8sResourceScore<K>
|
> Score<T> for K8sResourceScore<K>
|
||||||
where
|
where
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
<K as kube::Resource>::DynamicType: Default,
|
||||||
{
|
{
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
todo!()
|
Box::new(K8sResourceInterpret {
|
||||||
|
score: self.clone(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
pub mod application;
|
pub mod application;
|
||||||
|
pub mod argocd;
|
||||||
|
pub mod brocade;
|
||||||
pub mod cert_manager;
|
pub mod cert_manager;
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
@@ -13,6 +15,7 @@ pub mod load_balancer;
|
|||||||
pub mod monitoring;
|
pub mod monitoring;
|
||||||
pub mod okd;
|
pub mod okd;
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
|
pub mod postgresql;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
pub mod storage;
|
pub mod storage;
|
||||||
pub mod tenant;
|
pub mod tenant;
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::collections::BTreeMap;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use k8s_openapi::api::core::v1::Secret;
|
use k8s_openapi::api::core::v1::Secret;
|
||||||
use kube::Resource;
|
use kube::api::ObjectMeta;
|
||||||
use kube::api::{DynamicObject, ObjectMeta};
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
@@ -14,8 +13,6 @@ use crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::{
|
|||||||
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
AlertmanagerConfig, AlertmanagerConfigSpec, CRDPrometheus,
|
||||||
};
|
};
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::RHOBObservability;
|
||||||
use crate::modules::monitoring::okd::OpenshiftClusterAlertSender;
|
|
||||||
use crate::topology::oberservability::monitoring::AlertManagerReceiver;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::{InterpretError, Outcome},
|
interpret::{InterpretError, Outcome},
|
||||||
modules::monitoring::{
|
modules::monitoring::{
|
||||||
@@ -35,8 +32,10 @@ pub struct DiscordWebhook {
|
|||||||
pub url: Url,
|
pub url: Url,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscordWebhook {
|
#[async_trait]
|
||||||
fn get_receiver_config(&self) -> AlertManagerReceiver {
|
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
||||||
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
|
let ns = sender.namespace.clone();
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
let secret_name = format!("{}-secret", self.name.clone());
|
||||||
let webhook_key = format!("{}", self.url.clone());
|
let webhook_key = format!("{}", self.url.clone());
|
||||||
|
|
||||||
@@ -53,74 +52,26 @@ impl DiscordWebhook {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
AlertManagerReceiver {
|
let _ = sender.client.apply(&secret, Some(&ns)).await;
|
||||||
additional_ressources: vec![],
|
|
||||||
|
|
||||||
receiver_config: json!({
|
|
||||||
"name": self.name,
|
|
||||||
"discordConfigs": [
|
|
||||||
{
|
|
||||||
"apiURL": {
|
|
||||||
"name": secret_name,
|
|
||||||
"key": "webhook-url",
|
|
||||||
},
|
|
||||||
"title": "{{ template \"discord.default.title\" . }}",
|
|
||||||
"message": "{{ template \"discord.default.message\" . }}"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<OpenshiftClusterAlertSender> for DiscordWebhook {
|
|
||||||
async fn install(
|
|
||||||
&self,
|
|
||||||
sender: &OpenshiftClusterAlertSender,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name(&self) -> String {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clone_box(&self) -> Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
self.get_receiver_config()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
|
||||||
let ns = sender.namespace.clone();
|
|
||||||
|
|
||||||
let config = self.get_receiver_config();
|
|
||||||
for resource in config.additional_ressources.iter() {
|
|
||||||
todo!("can I apply a dynamicresource");
|
|
||||||
// sender.client.apply(resource, Some(&ns)).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
"route": {
|
"route": {
|
||||||
"receiver": self.name,
|
"receiver": self.name,
|
||||||
},
|
},
|
||||||
"receivers": [
|
"receivers": [
|
||||||
config.receiver_config
|
{
|
||||||
|
"name": self.name,
|
||||||
|
"discordConfigs": [
|
||||||
|
{
|
||||||
|
"apiURL": {
|
||||||
|
"name": secret_name,
|
||||||
|
"key": "webhook-url",
|
||||||
|
},
|
||||||
|
"title": "{{ template \"discord.default.title\" . }}",
|
||||||
|
"message": "{{ template \"discord.default.message\" . }}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
@@ -171,9 +122,6 @@ impl AlertReceiver<RHOBObservability> for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
let ns = sender.namespace.clone();
|
let ns = sender.namespace.clone();
|
||||||
let secret_name = format!("{}-secret", self.name.clone());
|
let secret_name = format!("{}-secret", self.name.clone());
|
||||||
@@ -252,9 +200,6 @@ impl AlertReceiver<CRDPrometheus> for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<Prometheus> for DiscordWebhook {
|
impl AlertReceiver<Prometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
@@ -281,9 +226,6 @@ impl PrometheusReceiver for DiscordWebhook {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
impl AlertReceiver<KubePrometheus> for DiscordWebhook {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
prometheus::prometheus::{Prometheus, PrometheusReceiver},
|
||||||
},
|
},
|
||||||
topology::oberservability::monitoring::{AlertManagerReceiver, AlertReceiver},
|
topology::oberservability::monitoring::AlertReceiver,
|
||||||
};
|
};
|
||||||
use harmony_types::net::Url;
|
use harmony_types::net::Url;
|
||||||
|
|
||||||
@@ -31,9 +31,6 @@ pub struct WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &RHOBObservability) -> Result<Outcome, InterpretError> {
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::rhob_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
@@ -100,9 +97,6 @@ impl AlertReceiver<RHOBObservability> for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &CRDPrometheus) -> Result<Outcome, InterpretError> {
|
||||||
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
let spec = crate::modules::monitoring::kube_prometheus::crd::crd_alertmanager_config::AlertmanagerConfigSpec {
|
||||||
data: json!({
|
data: json!({
|
||||||
@@ -164,9 +158,6 @@ impl AlertReceiver<CRDPrometheus> for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<Prometheus> for WebhookReceiver {
|
impl AlertReceiver<Prometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &Prometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
@@ -193,9 +184,6 @@ impl PrometheusReceiver for WebhookReceiver {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
impl AlertReceiver<KubePrometheus> for WebhookReceiver {
|
||||||
fn as_alertmanager_receiver(&self) -> AlertManagerReceiver {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
async fn install(&self, sender: &KubePrometheus) -> Result<Outcome, InterpretError> {
|
||||||
sender.install_receiver(self).await
|
sender.install_receiver(self).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,8 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use kube::CustomResource;
|
use kube::CustomResource;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::{
|
use crate::modules::monitoring::kube_prometheus::crd::rhob_prometheuses::LabelSelector;
|
||||||
LabelSelector, PrometheusSpec,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
/// MonitoringStack CRD for monitoring.rhobs/v1alpha1
|
||||||
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
#[derive(CustomResource, Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||||
|
|||||||
@@ -100,11 +100,7 @@ impl<T: Topology + HelmCommand + K8sclient + MultiTargetTopology> Interpret<T> f
|
|||||||
|
|
||||||
info!("deploying ntfy...");
|
info!("deploying ntfy...");
|
||||||
client
|
client
|
||||||
.wait_until_deployment_ready(
|
.wait_until_deployment_ready("ntfy", Some(self.score.namespace.as_str()), None)
|
||||||
"ntfy".to_string(),
|
|
||||||
Some(self.score.namespace.as_str()),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
info!("ntfy deployed");
|
info!("ntfy deployed");
|
||||||
|
|
||||||
|
|||||||
@@ -1,139 +0,0 @@
|
|||||||
use base64::prelude::*;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use kube::api::DynamicObject;
|
|
||||||
use log::{debug, info, trace};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::Inventory,
|
|
||||||
modules::{
|
|
||||||
application::Application,
|
|
||||||
monitoring::{
|
|
||||||
grafana::grafana::Grafana,
|
|
||||||
kube_prometheus::crd::crd_alertmanager_config::CRDPrometheus,
|
|
||||||
okd::OpenshiftClusterAlertSender,
|
|
||||||
},
|
|
||||||
prometheus::prometheus::PrometheusMonitoring,
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::{
|
|
||||||
K8sclient, Topology,
|
|
||||||
k8s::K8sClient,
|
|
||||||
oberservability::monitoring::{AlertReceiver, AlertingInterpret, ScrapeTarget},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl Clone for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.clone_box()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Box<dyn AlertReceiver<OpenshiftClusterAlertSender>> {
|
|
||||||
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
pub struct OpenshiftClusterAlertScore {
|
|
||||||
pub receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Topology + K8sclient> Score<T> for OpenshiftClusterAlertScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"ClusterAlertScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
|
||||||
Box::new(OpenshiftClusterAlertInterpret {
|
|
||||||
receivers: self.receivers.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OpenshiftClusterAlertInterpret {
|
|
||||||
receivers: Vec<Box<dyn AlertReceiver<OpenshiftClusterAlertSender>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftClusterAlertInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
topology: &T,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let client = topology.k8s_client().await?;
|
|
||||||
|
|
||||||
let secret: DynamicObject = client
|
|
||||||
.get_secret_json_value("alertmanager-main", Some("openshift-monitoring"))
|
|
||||||
.await?;
|
|
||||||
trace!("Got secret {secret:?}");
|
|
||||||
|
|
||||||
let data: serde_json::Value = secret.data;
|
|
||||||
|
|
||||||
// TODO : get config in base64 by drilling into the value
|
|
||||||
let config_b64 = match data.get("alertmanager.yaml") {
|
|
||||||
Some(value) => value.as_str().unwrap_or(""),
|
|
||||||
None => "",
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO : base64 decode it
|
|
||||||
let config_bytes = BASE64_STANDARD.decode(config_b64).unwrap_or_default();
|
|
||||||
|
|
||||||
// TODO : use serde_yaml to deserialize the string
|
|
||||||
let am_config: serde_yaml::Value =
|
|
||||||
serde_yaml::from_str(&String::from_utf8(config_bytes).unwrap_or_default())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
// Merge current alert receivers from this config with self.receivers
|
|
||||||
if let Some(existing_receivers) = am_config.get("receivers") {
|
|
||||||
for receiver in existing_receivers.as_sequence().unwrap_or(&vec![]) {
|
|
||||||
match serde_json::to_string(receiver) {
|
|
||||||
Ok(yaml_str) => {
|
|
||||||
// TODO: validate that each receiver implements to_alertmanager_yaml()
|
|
||||||
// and compare with our receivers
|
|
||||||
info!("Found existing receiver config: {}", yaml_str);
|
|
||||||
}
|
|
||||||
Err(e) => debug!("Failed to serialize receiver: {}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for custom_receiver in &self.receivers {
|
|
||||||
trace!("Processing custom receiver");
|
|
||||||
debug!(
|
|
||||||
"Custom receiver YAML output: {:?}",
|
|
||||||
custom_receiver.as_alertmanager_receiver()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Outcome::success(todo!("whats up")))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OpenshiftClusterAlertInterpret")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
interpret::{InterpretError, Outcome},
|
|
||||||
topology::k8s::K8sClient,
|
|
||||||
};
|
|
||||||
use k8s_openapi::api::core::v1::ConfigMap;
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
|
|
||||||
pub(crate) struct Config;
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
pub async fn create_cluster_monitoring_config_cm(
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
enableUserWorkload: true
|
|
||||||
alertmanagerMain:
|
|
||||||
enableUserAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("cluster-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client.apply(&cm, Some("openshift-monitoring")).await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated cluster-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_user_workload_monitoring_config_cm(
|
|
||||||
client: &Arc<K8sClient>,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
let mut data = BTreeMap::new();
|
|
||||||
data.insert(
|
|
||||||
"config.yaml".to_string(),
|
|
||||||
r#"
|
|
||||||
alertmanager:
|
|
||||||
enabled: true
|
|
||||||
enableAlertmanagerConfig: true
|
|
||||||
"#
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
let cm = ConfigMap {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("user-workload-monitoring-config".to_string()),
|
|
||||||
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
data: Some(data),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
client
|
|
||||||
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"updated openshift-user-monitoring-config-map".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn verify_user_workload(client: &Arc<K8sClient>) -> Result<Outcome, InterpretError> {
|
|
||||||
let namespace = "openshift-user-workload-monitoring";
|
|
||||||
let alertmanager_name = "alertmanager-user-workload-0";
|
|
||||||
let prometheus_name = "prometheus-user-workload-0";
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
client
|
|
||||||
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"pods: {}, {} ready in ns: {}",
|
|
||||||
alertmanager_name, prometheus_name, namespace
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,13 +1,16 @@
|
|||||||
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
data::Version,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
modules::monitoring::okd::config::Config,
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{K8sclient, Topology},
|
topology::{K8sclient, Topology, k8s::K8sClient},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
|
use k8s_openapi::api::core::v1::ConfigMap;
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
@@ -34,9 +37,10 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
|||||||
topology: &T,
|
topology: &T,
|
||||||
) -> Result<Outcome, InterpretError> {
|
) -> Result<Outcome, InterpretError> {
|
||||||
let client = topology.k8s_client().await.unwrap();
|
let client = topology.k8s_client().await.unwrap();
|
||||||
Config::create_cluster_monitoring_config_cm(&client).await?;
|
self.update_cluster_monitoring_config_cm(&client).await?;
|
||||||
Config::create_user_workload_monitoring_config_cm(&client).await?;
|
self.update_user_workload_monitoring_config_cm(&client)
|
||||||
Config::verify_user_workload(&client).await?;
|
.await?;
|
||||||
|
self.verify_user_workload(&client).await?;
|
||||||
Ok(Outcome::success(
|
Ok(Outcome::success(
|
||||||
"successfully enabled user-workload-monitoring".to_string(),
|
"successfully enabled user-workload-monitoring".to_string(),
|
||||||
))
|
))
|
||||||
@@ -58,3 +62,88 @@ impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringIn
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl OpenshiftUserWorkloadMonitoringInterpret {
|
||||||
|
pub async fn update_cluster_monitoring_config_cm(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let mut data = BTreeMap::new();
|
||||||
|
data.insert(
|
||||||
|
"config.yaml".to_string(),
|
||||||
|
r#"
|
||||||
|
enableUserWorkload: true
|
||||||
|
alertmanagerMain:
|
||||||
|
enableUserAlertmanagerConfig: true
|
||||||
|
"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("cluster-monitoring-config".to_string()),
|
||||||
|
namespace: Some("openshift-monitoring".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(data),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
client.apply(&cm, Some("openshift-monitoring")).await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"updated cluster-monitoring-config-map".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_user_workload_monitoring_config_cm(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let mut data = BTreeMap::new();
|
||||||
|
data.insert(
|
||||||
|
"config.yaml".to_string(),
|
||||||
|
r#"
|
||||||
|
alertmanager:
|
||||||
|
enabled: true
|
||||||
|
enableAlertmanagerConfig: true
|
||||||
|
"#
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
let cm = ConfigMap {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("user-workload-monitoring-config".to_string()),
|
||||||
|
namespace: Some("openshift-user-workload-monitoring".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
data: Some(data),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
client
|
||||||
|
.apply(&cm, Some("openshift-user-workload-monitoring"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"updated openshift-user-monitoring-config-map".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn verify_user_workload(
|
||||||
|
&self,
|
||||||
|
client: &Arc<K8sClient>,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let namespace = "openshift-user-workload-monitoring";
|
||||||
|
let alertmanager_name = "alertmanager-user-workload-0";
|
||||||
|
let prometheus_name = "prometheus-user-workload-0";
|
||||||
|
client
|
||||||
|
.wait_for_pod_ready(alertmanager_name, Some(namespace))
|
||||||
|
.await?;
|
||||||
|
client
|
||||||
|
.wait_for_pod_ready(prometheus_name, Some(namespace))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"pods: {}, {} ready in ns: {}",
|
||||||
|
alertmanager_name, prometheus_name, namespace
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1 @@
|
|||||||
use crate::topology::oberservability::monitoring::AlertSender;
|
|
||||||
|
|
||||||
pub mod cluster_monitoring;
|
|
||||||
pub(crate) mod config;
|
|
||||||
pub mod enable_user_workload;
|
pub mod enable_user_workload;
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OpenshiftClusterAlertSender;
|
|
||||||
|
|
||||||
impl AlertSender for OpenshiftClusterAlertSender {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OpenshiftClusterAlertSender".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use crate::{
|
|||||||
infra::inventory::InventoryRepositoryFactory,
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::{HostRole, Inventory},
|
inventory::{HostRole, Inventory},
|
||||||
modules::inventory::DiscoverHostForRoleScore,
|
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
@@ -104,6 +104,8 @@ When you can dig them, confirm to continue.
|
|||||||
bootstrap_host = hosts.into_iter().next().to_owned();
|
bootstrap_host = hosts.into_iter().next().to_owned();
|
||||||
DiscoverHostForRoleScore {
|
DiscoverHostForRoleScore {
|
||||||
role: HostRole::Bootstrap,
|
role: HostRole::Bootstrap,
|
||||||
|
number_desired_hosts: 1,
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||||
}
|
}
|
||||||
.interpret(inventory, topology)
|
.interpret(inventory, topology)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@@ -1,22 +1,10 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
hardware::PhysicalHost,
|
inventory::HostRole,
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::{HostRole, Inventory},
|
|
||||||
modules::{
|
|
||||||
dhcp::DhcpHostBindingScore,
|
|
||||||
http::IPxeMacBootFileScore,
|
|
||||||
inventory::DiscoverHostForRoleScore,
|
|
||||||
okd::{host_network::HostNetworkConfigurationScore, templates::BootstrapIpxeTpl},
|
|
||||||
},
|
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HAClusterTopology, HostBinding},
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
@@ -25,257 +13,23 @@ use serde::Serialize;
|
|||||||
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
|
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct OKDSetup03ControlPlaneScore {}
|
pub struct OKDSetup03ControlPlaneScore {
|
||||||
|
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
|
// and for the cluster operators to become available. This would be similar to
|
||||||
|
// the `wait-for bootstrap-complete` command.
|
||||||
|
Box::new(OKDNodeInterpret::new(
|
||||||
|
HostRole::ControlPlane,
|
||||||
|
self.discovery_strategy.clone(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"OKDSetup03ControlPlaneScore".to_string()
|
"OKDSetup03ControlPlaneScore".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct OKDSetup03ControlPlaneInterpret {
|
|
||||||
version: Version,
|
|
||||||
status: InterpretStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDSetup03ControlPlaneInterpret {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let version = Version::from("1.0.0").unwrap();
|
|
||||||
Self {
|
|
||||||
version,
|
|
||||||
status: InterpretStatus::QUEUED,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
|
||||||
/// It will trigger discovery if not enough hosts are found.
|
|
||||||
async fn get_nodes(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
|
||||||
const REQUIRED_HOSTS: usize = 3;
|
|
||||||
let repo = InventoryRepositoryFactory::build().await?;
|
|
||||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
|
||||||
|
|
||||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
|
||||||
info!(
|
|
||||||
"Discovery of {} control plane hosts in progress, current number {}",
|
|
||||||
REQUIRED_HOSTS,
|
|
||||||
control_plane_hosts.len()
|
|
||||||
);
|
|
||||||
// This score triggers the discovery agent for a specific role.
|
|
||||||
DiscoverHostForRoleScore {
|
|
||||||
role: HostRole::ControlPlane,
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
|
||||||
REQUIRED_HOSTS,
|
|
||||||
control_plane_hosts.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
// Take exactly the number of required hosts to ensure consistency.
|
|
||||||
Ok(control_plane_hosts
|
|
||||||
.into_iter()
|
|
||||||
.take(REQUIRED_HOSTS)
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configures DHCP host bindings for all control plane nodes.
|
|
||||||
async fn configure_host_binding(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
|
|
||||||
|
|
||||||
// Ensure the topology definition matches the number of physical nodes found.
|
|
||||||
if topology.control_plane.len() != nodes.len() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
|
|
||||||
topology.control_plane.len(),
|
|
||||||
nodes.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a binding for each physical host to its corresponding logical host.
|
|
||||||
let bindings: Vec<HostBinding> = topology
|
|
||||||
.control_plane
|
|
||||||
.iter()
|
|
||||||
.zip(nodes.iter())
|
|
||||||
.map(|(logical_host, physical_host)| {
|
|
||||||
info!(
|
|
||||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
|
||||||
logical_host.name, physical_host.id
|
|
||||||
);
|
|
||||||
HostBinding {
|
|
||||||
logical_host: logical_host.clone(),
|
|
||||||
physical_host: physical_host.clone(),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
DhcpHostBindingScore {
|
|
||||||
host_binding: bindings,
|
|
||||||
domain: Some(topology.domain_name.clone()),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
|
|
||||||
async fn configure_ipxe(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
|
|
||||||
|
|
||||||
// The iPXE script content is the same for all control plane nodes,
|
|
||||||
// pointing to the 'master.ign' ignition file.
|
|
||||||
let content = BootstrapIpxeTpl {
|
|
||||||
http_ip: &topology.http_server.get_ip().to_string(),
|
|
||||||
scos_path: "scos",
|
|
||||||
ignition_http_path: "okd_ignition_files",
|
|
||||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
|
||||||
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
|
|
||||||
}
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
debug!("[ControlPlane] iPXE content template:\n{content}");
|
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
|
||||||
for node in nodes {
|
|
||||||
let mac_address = node.get_mac_address();
|
|
||||||
if mac_address.is_empty() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
|
||||||
node.id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
|
||||||
node.id, mac_address
|
|
||||||
);
|
|
||||||
|
|
||||||
IPxeMacBootFileScore {
|
|
||||||
mac_address,
|
|
||||||
content: content.clone(),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prompts the user to reboot the target control plane nodes.
|
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
|
||||||
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
|
||||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
|
||||||
|
|
||||||
if !confirmation {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"User aborted the operation.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Placeholder for automating network bonding configuration.
|
|
||||||
async fn persist_network_bond(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
hosts: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!("[ControlPlane] Ensuring persistent bonding");
|
|
||||||
let score = HostNetworkConfigurationScore {
|
|
||||||
hosts: hosts.clone(),
|
|
||||||
};
|
|
||||||
score.interpret(inventory, topology).await?;
|
|
||||||
|
|
||||||
inquire::Confirm::new(
|
|
||||||
"Network configuration for control plane nodes is not automated yet. Configure it manually if needed.",
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OKDSetup03ControlPlane")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
self.version.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
self.status.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
// 1. Ensure we have 3 physical hosts for the control plane.
|
|
||||||
let nodes = self.get_nodes(inventory, topology).await?;
|
|
||||||
|
|
||||||
// 2. Create DHCP reservations for the control plane nodes.
|
|
||||||
self.configure_host_binding(inventory, topology, &nodes)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Create iPXE files for each control plane node to boot from the master ignition.
|
|
||||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
|
||||||
|
|
||||||
// 4. Reboot the nodes to start the OS installation.
|
|
||||||
self.reboot_targets(&nodes).await?;
|
|
||||||
|
|
||||||
// 5. Placeholder for post-boot network configuration (e.g., bonding).
|
|
||||||
self.persist_network_bond(inventory, topology, &nodes)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
|
||||||
// and for the cluster operators to become available. This would be similar to
|
|
||||||
// the `wait-for bootstrap-complete` command.
|
|
||||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
|
||||||
|
|
||||||
Ok(Outcome::success(
|
|
||||||
"Control plane provisioning has been successfully initiated.".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::info;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::Version,
|
interpret::Interpret,
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
inventory::HostRole,
|
||||||
inventory::Inventory,
|
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
@@ -18,66 +14,20 @@ use crate::{
|
|||||||
// - Persist bonding via MC/NNCP as required (same approach as masters).
|
// - Persist bonding via MC/NNCP as required (same approach as masters).
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
pub struct OKDSetup04WorkersScore {}
|
pub struct OKDSetup04WorkersScore {
|
||||||
|
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
Box::new(OKDNodeInterpret::new(
|
||||||
|
HostRole::ControlPlane,
|
||||||
|
self.discovery_strategy.clone(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"OKDSetup04WorkersScore".to_string()
|
"OKDSetup04WorkersScore".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct OKDSetup04WorkersInterpret {
|
|
||||||
score: OKDSetup04WorkersScore,
|
|
||||||
version: Version,
|
|
||||||
status: InterpretStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDSetup04WorkersInterpret {
|
|
||||||
pub fn new(score: OKDSetup04WorkersScore) -> Self {
|
|
||||||
let version = Version::from("1.0.0").unwrap();
|
|
||||||
Self {
|
|
||||||
version,
|
|
||||||
score,
|
|
||||||
status: InterpretStatus::QUEUED,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
|
|
||||||
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OKDSetup04Workers")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
self.version.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
self.status.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
_inventory: &Inventory,
|
|
||||||
_topology: &HAClusterTopology,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
self.render_and_reboot().await?;
|
|
||||||
Ok(Outcome::success("Workers provisioned".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, info};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore,
|
||||||
|
http::IPxeMacBootFileScore,
|
||||||
|
inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||||
|
okd::{
|
||||||
|
okd_node::{BootstrapRole, ControlPlaneRole, OKDRoleProperties, WorkerRole},
|
||||||
|
templates::BootstrapIpxeTpl,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HAClusterTopology, HostBinding, LogicalHost},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
|
pub struct OKDNodeInstallationScore {
|
||||||
|
host_role: HostRole,
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Score<HAClusterTopology> for OKDNodeInstallationScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"OKDNodeScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
|
Box::new(OKDNodeInterpret::new(
|
||||||
|
self.host_role.clone(),
|
||||||
|
self.discovery_strategy.clone(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OKDNodeInterpret {
|
||||||
|
host_role: HostRole,
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDNodeInterpret {
|
||||||
|
pub fn new(host_role: HostRole, discovery_strategy: HarmonyDiscoveryStrategy) -> Self {
|
||||||
|
Self {
|
||||||
|
host_role,
|
||||||
|
discovery_strategy,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties {
|
||||||
|
match role {
|
||||||
|
HostRole::Bootstrap => &BootstrapRole,
|
||||||
|
HostRole::ControlPlane => &ControlPlaneRole,
|
||||||
|
HostRole::Worker => &WorkerRole,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||||
|
let repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
|
||||||
|
let mut hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||||
|
|
||||||
|
let okd_host_properties = self.okd_role_properties(&self.host_role);
|
||||||
|
|
||||||
|
let required_hosts: i16 = okd_host_properties.required_hosts();
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Discovery of {} {} hosts in progress, current number {}",
|
||||||
|
required_hosts,
|
||||||
|
self.host_role,
|
||||||
|
hosts.len()
|
||||||
|
);
|
||||||
|
// This score triggers the discovery agent for a specific role.
|
||||||
|
DiscoverHostForRoleScore {
|
||||||
|
role: self.host_role.clone(),
|
||||||
|
number_desired_hosts: required_hosts,
|
||||||
|
discovery_strategy: self.discovery_strategy.clone(),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||||
|
|
||||||
|
if hosts.len() < required_hosts.try_into().unwrap_or(0) {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.",
|
||||||
|
required_hosts,
|
||||||
|
self.host_role,
|
||||||
|
hosts.len()
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
// Take exactly the number of required hosts to ensure consistency.
|
||||||
|
Ok(hosts
|
||||||
|
.into_iter()
|
||||||
|
.take(required_hosts.try_into().unwrap())
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configures DHCP host bindings for all nodes.
|
||||||
|
async fn configure_host_binding(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!(
|
||||||
|
"[{}] Configuring host bindings for {} plane nodes.",
|
||||||
|
self.host_role, self.host_role,
|
||||||
|
);
|
||||||
|
|
||||||
|
let host_properties = self.okd_role_properties(&self.host_role);
|
||||||
|
|
||||||
|
self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?;
|
||||||
|
|
||||||
|
let bindings: Vec<HostBinding> =
|
||||||
|
self.host_bindings(nodes, host_properties.logical_hosts(topology));
|
||||||
|
|
||||||
|
DhcpHostBindingScore {
|
||||||
|
host_binding: bindings,
|
||||||
|
domain: Some(topology.domain_name.clone()),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the topology definition matches the number of physical nodes found.
|
||||||
|
fn validate_host_node_match(
|
||||||
|
&self,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
hosts: &Vec<LogicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
if hosts.len() != nodes.len() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).",
|
||||||
|
hosts.len(),
|
||||||
|
nodes.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a binding for each physical host to its corresponding logical host.
|
||||||
|
fn host_bindings(
|
||||||
|
&self,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
hosts: &Vec<LogicalHost>,
|
||||||
|
) -> Vec<HostBinding> {
|
||||||
|
hosts
|
||||||
|
.iter()
|
||||||
|
.zip(nodes.iter())
|
||||||
|
.map(|(logical_host, physical_host)| {
|
||||||
|
info!(
|
||||||
|
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||||
|
logical_host.name, physical_host.id
|
||||||
|
);
|
||||||
|
HostBinding {
|
||||||
|
logical_host: logical_host.clone(),
|
||||||
|
physical_host: physical_host.clone(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Renders and deploys a per-MAC iPXE boot file for each node.
|
||||||
|
async fn configure_ipxe(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!(
|
||||||
|
"[{}] Rendering per-MAC iPXE configurations.",
|
||||||
|
self.host_role
|
||||||
|
);
|
||||||
|
|
||||||
|
let okd_role_properties = self.okd_role_properties(&self.host_role);
|
||||||
|
// The iPXE script content is the same for all control plane nodes,
|
||||||
|
// pointing to the 'master.ign' ignition file.
|
||||||
|
let content = BootstrapIpxeTpl {
|
||||||
|
http_ip: &topology.http_server.get_ip().to_string(),
|
||||||
|
scos_path: "scos",
|
||||||
|
ignition_http_path: "okd_ignition_files",
|
||||||
|
//TODO must be refactored to not only use /dev/sda
|
||||||
|
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||||
|
ignition_file_name: okd_role_properties.ignition_file(),
|
||||||
|
}
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
debug!("[{}] iPXE content template:\n{content}", self.host_role);
|
||||||
|
|
||||||
|
// Create and apply an iPXE boot file for each node.
|
||||||
|
for node in nodes {
|
||||||
|
let mac_address = node.get_mac_address();
|
||||||
|
if mac_address.is_empty() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||||
|
node.id
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
info!(
|
||||||
|
"[{}] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||||
|
self.host_role, node.id, mac_address
|
||||||
|
);
|
||||||
|
|
||||||
|
IPxeMacBootFileScore {
|
||||||
|
mac_address,
|
||||||
|
content: content.clone(),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prompts the user to reboot the target control plane nodes.
|
||||||
|
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||||
|
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||||
|
info!(
|
||||||
|
"[{}] Requesting reboot for control plane nodes: {node_ids:?}",
|
||||||
|
self.host_role
|
||||||
|
);
|
||||||
|
|
||||||
|
let confirmation = inquire::Confirm::new(
|
||||||
|
&format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")),
|
||||||
|
)
|
||||||
|
.prompt()
|
||||||
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||||
|
|
||||||
|
if !confirmation {
|
||||||
|
return Err(InterpretError::new(
|
||||||
|
"User aborted the operation.".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Interpret<HAClusterTopology> for OKDNodeInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
// 1. Ensure we have the specfied number of physical hosts.
|
||||||
|
let nodes = self.get_nodes(inventory, topology).await?;
|
||||||
|
|
||||||
|
// 2. Create DHCP reservations for the nodes.
|
||||||
|
self.configure_host_binding(inventory, topology, &nodes)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// 3. Create iPXE files for each node to boot from the ignition.
|
||||||
|
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||||
|
|
||||||
|
// 4. Reboot the nodes to start the OS installation.
|
||||||
|
self.reboot_targets(&nodes).await?;
|
||||||
|
// TODO: Implement a step to validate that the installation of the nodes is
|
||||||
|
// complete and for the cluster operators to become available.
|
||||||
|
//
|
||||||
|
// The OpenShift installer only provides two wait commands which currently need to be
|
||||||
|
// run manually:
|
||||||
|
// - `openshift-install wait-for bootstrap-complete`
|
||||||
|
// - `openshift-install wait-for install-complete`
|
||||||
|
//
|
||||||
|
// There is no installer command that waits specifically for worker node
|
||||||
|
// provisioning. Worker nodes join asynchronously (via ignition + CSR approval),
|
||||||
|
// and the cluster becomes fully functional only once all nodes are Ready and the
|
||||||
|
// cluster operators report Available=True.
|
||||||
|
info!(
|
||||||
|
"[{}] Provisioning initiated. Monitor the cluster convergence manually.",
|
||||||
|
self.host_role
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"{} provisioning has been successfully initiated.",
|
||||||
|
self.host_role
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("OKDNodeSetup".into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
130
harmony/src/modules/okd/bootstrap_persist_network_bond.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::okd::host_network::HostNetworkConfigurationScore,
|
||||||
|
score::Score,
|
||||||
|
topology::HAClusterTopology,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::info;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
// Persist Network Bond
|
||||||
|
// - Persist bonding via NMState
|
||||||
|
// - Persist port channels on the Switch
|
||||||
|
// -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, new)]
|
||||||
|
pub struct OKDSetupPersistNetworkBondScore {}
|
||||||
|
|
||||||
|
impl Score<HAClusterTopology> for OKDSetupPersistNetworkBondScore {
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
|
Box::new(OKDSetupPersistNetworkBondInterpet::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"OKDSetupPersistNetworkBondScore".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OKDSetupPersistNetworkBondInterpet {
|
||||||
|
version: Version,
|
||||||
|
status: InterpretStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDSetupPersistNetworkBondInterpet {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let version = Version::from("1.0.0").unwrap();
|
||||||
|
Self {
|
||||||
|
version,
|
||||||
|
status: InterpretStatus::QUEUED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||||
|
/// It will trigger discovery if not enough hosts are found.
|
||||||
|
async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
_topology: &HAClusterTopology,
|
||||||
|
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||||
|
const REQUIRED_HOSTS: usize = 3;
|
||||||
|
let repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
let control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||||
|
|
||||||
|
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||||
|
REQUIRED_HOSTS,
|
||||||
|
control_plane_hosts.len()
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
// Take exactly the number of required hosts to ensure consistency.
|
||||||
|
Ok(control_plane_hosts
|
||||||
|
.into_iter()
|
||||||
|
.take(REQUIRED_HOSTS)
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn persist_network_bond(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
hosts: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!("Ensuring persistent bonding");
|
||||||
|
|
||||||
|
let score = HostNetworkConfigurationScore {
|
||||||
|
hosts: hosts.clone(),
|
||||||
|
};
|
||||||
|
score.interpret(inventory, topology).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Interpret<HAClusterTopology> for OKDSetupPersistNetworkBondInterpet {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("OKDSetupPersistNetworkBondInterpet")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
self.version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
self.status.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
let nodes = self.get_nodes(inventory, topology).await?;
|
||||||
|
|
||||||
|
let res = self.persist_network_bond(inventory, topology, &nodes).await;
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(_) => Ok(Outcome::success(
|
||||||
|
"Network bond successfully persisted".into(),
|
||||||
|
)),
|
||||||
|
Err(_) => Err(InterpretError::new(
|
||||||
|
"Failed to persist network bond".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,41 +1 @@
|
|||||||
use kube::CustomResource;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
pub mod nmstate;
|
pub mod nmstate;
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "operators.coreos.com",
|
|
||||||
version = "v1",
|
|
||||||
kind = "OperatorGroup",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct OperatorGroupSpec {
|
|
||||||
pub target_namespaces: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[kube(
|
|
||||||
group = "operators.coreos.com",
|
|
||||||
version = "v1alpha1",
|
|
||||||
kind = "Subscription",
|
|
||||||
namespaced
|
|
||||||
)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SubscriptionSpec {
|
|
||||||
pub name: String,
|
|
||||||
pub source: String,
|
|
||||||
pub source_namespace: String,
|
|
||||||
pub channel: Option<String>,
|
|
||||||
pub install_plan_approval: Option<InstallPlanApproval>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
pub enum InstallPlanApproval {
|
|
||||||
#[serde(rename = "Automatic")]
|
|
||||||
Automatic,
|
|
||||||
#[serde(rename = "Manual")]
|
|
||||||
Manual,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,14 +1,22 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use kube::CustomResource;
|
use k8s_openapi::{ClusterResourceScope, Resource};
|
||||||
|
use kube::{CustomResource, api::ObjectMeta};
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
#[kube(group = "nmstate.io", version = "v1", kind = "NMState", namespaced)]
|
#[kube(
|
||||||
|
group = "nmstate.io",
|
||||||
|
version = "v1",
|
||||||
|
kind = "NMState",
|
||||||
|
plural = "nmstates",
|
||||||
|
namespaced = false
|
||||||
|
)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct NMStateSpec {
|
pub struct NMStateSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub probe_configuration: Option<ProbeConfig>,
|
pub probe_configuration: Option<ProbeConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,55 +48,350 @@ pub struct ProbeDns {
|
|||||||
group = "nmstate.io",
|
group = "nmstate.io",
|
||||||
version = "v1",
|
version = "v1",
|
||||||
kind = "NodeNetworkConfigurationPolicy",
|
kind = "NodeNetworkConfigurationPolicy",
|
||||||
namespaced
|
namespaced = false
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct NodeNetworkConfigurationPolicySpec {
|
pub struct NodeNetworkConfigurationPolicySpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub node_selector: Option<BTreeMap<String, String>>,
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
pub desired_state: DesiredStateSpec,
|
pub desired_state: NetworkState,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, kube-rs derive doesn't support resources without a `spec` field, so we have
|
||||||
|
// to implement it ourselves.
|
||||||
|
//
|
||||||
|
// Ref:
|
||||||
|
// - https://github.com/kube-rs/kube/issues/1763
|
||||||
|
// - https://github.com/kube-rs/kube/discussions/1762
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NodeNetworkState {
|
||||||
|
metadata: ObjectMeta,
|
||||||
|
pub status: NodeNetworkStateStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resource for NodeNetworkState {
|
||||||
|
const API_VERSION: &'static str = "nmstate.io/v1beta1";
|
||||||
|
const GROUP: &'static str = "nmstate.io";
|
||||||
|
const VERSION: &'static str = "v1beta1";
|
||||||
|
const KIND: &'static str = "NodeNetworkState";
|
||||||
|
const URL_PATH_SEGMENT: &'static str = "nodenetworkstates";
|
||||||
|
type Scope = ClusterResourceScope;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl k8s_openapi::Metadata for NodeNetworkState {
|
||||||
|
type Ty = ObjectMeta;
|
||||||
|
|
||||||
|
fn metadata(&self) -> &Self::Ty {
|
||||||
|
&self.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||||
|
&mut self.metadata
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct NodeNetworkStateStatus {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub current_state: Option<NetworkState>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub handler_nmstate_version: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub host_network_manager_version: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub last_successful_update_time: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The NetworkState is the top-level struct, representing the entire
|
||||||
|
/// desired or current network state.
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct DesiredStateSpec {
|
#[serde(deny_unknown_fields)]
|
||||||
pub interfaces: Vec<InterfaceSpec>,
|
pub struct NetworkState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub hostname: Option<HostNameState>,
|
||||||
|
#[serde(rename = "dns-resolver", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub dns: Option<DnsState>,
|
||||||
|
#[serde(rename = "route-rules", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub rules: Option<RouteRuleState>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub routes: Option<RouteState>,
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
|
pub interfaces: Vec<Interface>,
|
||||||
|
#[serde(rename = "ovs-db", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ovsdb: Option<OvsDbGlobalConfig>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ovn: Option<OvnConfiguration>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct InterfaceSpec {
|
pub struct HostNameState {
|
||||||
pub name: String,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub description: Option<String>,
|
pub running: Option<String>,
|
||||||
pub r#type: String,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub state: String,
|
pub config: Option<String>,
|
||||||
pub mac_address: Option<String>,
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct DnsState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub running: Option<DnsResolverConfig>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<DnsResolverConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct DnsResolverConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub search: Option<Vec<String>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub server: Option<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct RouteRuleState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<Vec<RouteRule>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub running: Option<Vec<RouteRule>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct RouteState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub config: Option<Vec<Route>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub running: Option<Vec<Route>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct RouteRule {
|
||||||
|
#[serde(rename = "ip-from", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ip_from: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub route_table: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct Route {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub destination: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metric: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub next_hop_address: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub next_hop_interface: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub table_id: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mtu: Option<u32>,
|
pub mtu: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvsDbGlobalConfig {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub external_ids: Option<BTreeMap<String, String>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub other_config: Option<BTreeMap<String, String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvnConfiguration {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub bridge_mappings: Option<Vec<OvnBridgeMapping>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvnBridgeMapping {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub localnet: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub bridge: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub enum StpSpec {
|
||||||
|
Bool(bool),
|
||||||
|
Options(StpOptions),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct LldpState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub enabled: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct OvsDb {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub external_ids: Option<BTreeMap<String, String>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub other_config: Option<BTreeMap<String, String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct PatchState {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub peer: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct Interface {
|
||||||
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub r#type: InterfaceType,
|
||||||
|
pub state: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mac_address: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub copy_mac_from: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mtu: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub controller: Option<String>,
|
pub controller: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ipv4: Option<IpStackSpec>,
|
pub ipv4: Option<IpStackSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ipv6: Option<IpStackSpec>,
|
pub ipv6: Option<IpStackSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ethernet: Option<EthernetSpec>,
|
pub ethernet: Option<EthernetSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub link_aggregation: Option<BondSpec>,
|
pub link_aggregation: Option<BondSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub vlan: Option<VlanSpec>,
|
pub vlan: Option<VlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub vxlan: Option<VxlanSpec>,
|
pub vxlan: Option<VxlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mac_vtap: Option<MacVtapSpec>,
|
pub mac_vtap: Option<MacVtapSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mac_vlan: Option<MacVlanSpec>,
|
pub mac_vlan: Option<MacVlanSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub infiniband: Option<InfinibandSpec>,
|
pub infiniband: Option<InfinibandSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(alias = "bridge")]
|
||||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||||
pub ethtool: Option<EthtoolSpec>,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ethtool: Option<Value>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub accept_all_mac_addresses: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub identifier: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub lldp: Option<LldpState>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub permanent_mac_address: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub max_mtu: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub min_mtu: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mptcp: Option<Value>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub profile_name: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub wait_ip: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ovs_db: Option<OvsDb>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub driver: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub patch: Option<PatchState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub enum InterfaceType {
|
||||||
|
#[serde(rename = "unknown")]
|
||||||
|
Unknown,
|
||||||
|
#[serde(rename = "dummy")]
|
||||||
|
Dummy,
|
||||||
|
#[serde(rename = "loopback")]
|
||||||
|
Loopback,
|
||||||
|
#[serde(rename = "linux-bridge")]
|
||||||
|
LinuxBridge,
|
||||||
|
#[serde(rename = "ovs-bridge")]
|
||||||
|
OvsBridge,
|
||||||
|
#[serde(rename = "ovs-interface")]
|
||||||
|
OvsInterface,
|
||||||
|
#[serde(rename = "bond")]
|
||||||
|
Bond,
|
||||||
|
#[serde(rename = "ipvlan")]
|
||||||
|
IpVlan,
|
||||||
|
#[serde(rename = "vlan")]
|
||||||
|
Vlan,
|
||||||
|
#[serde(rename = "vxlan")]
|
||||||
|
Vxlan,
|
||||||
|
#[serde(rename = "mac-vlan")]
|
||||||
|
Macvlan,
|
||||||
|
#[serde(rename = "mac-vtap")]
|
||||||
|
Macvtap,
|
||||||
|
#[serde(rename = "ethernet")]
|
||||||
|
Ethernet,
|
||||||
|
#[serde(rename = "infiniband")]
|
||||||
|
Infiniband,
|
||||||
|
#[serde(rename = "vrf")]
|
||||||
|
Vrf,
|
||||||
|
#[serde(rename = "veth")]
|
||||||
|
Veth,
|
||||||
|
#[serde(rename = "ipsec")]
|
||||||
|
Ipsec,
|
||||||
|
#[serde(rename = "hsr")]
|
||||||
|
Hrs,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for InterfaceType {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Loopback
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct IpStackSpec {
|
pub struct IpStackSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub enabled: Option<bool>,
|
pub enabled: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub dhcp: Option<bool>,
|
pub dhcp: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub autoconf: Option<bool>,
|
pub autoconf: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub address: Option<Vec<IpAddressSpec>>,
|
pub address: Option<Vec<IpAddressSpec>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub auto_dns: Option<bool>,
|
pub auto_dns: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub auto_gateway: Option<bool>,
|
pub auto_gateway: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub auto_routes: Option<bool>,
|
pub auto_routes: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub dhcp_client_id: Option<String>,
|
pub dhcp_client_id: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub dhcp_duid: Option<String>,
|
pub dhcp_duid: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,8 +405,11 @@ pub struct IpAddressSpec {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct EthernetSpec {
|
pub struct EthernetSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub speed: Option<u32>,
|
pub speed: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub duplex: Option<String>,
|
pub duplex: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub auto_negotiation: Option<bool>,
|
pub auto_negotiation: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +417,9 @@ pub struct EthernetSpec {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct BondSpec {
|
pub struct BondSpec {
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
|
#[serde(alias = "port")]
|
||||||
pub ports: Vec<String>,
|
pub ports: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<BTreeMap<String, Value>>,
|
pub options: Option<BTreeMap<String, Value>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,6 +428,7 @@ pub struct BondSpec {
|
|||||||
pub struct VlanSpec {
|
pub struct VlanSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub id: u16,
|
pub id: u16,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub protocol: Option<String>,
|
pub protocol: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,8 +438,11 @@ pub struct VxlanSpec {
|
|||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub remote: String,
|
pub remote: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub local: Option<String>,
|
pub local: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub learning: Option<bool>,
|
pub learning: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub destination_port: Option<u16>,
|
pub destination_port: Option<u16>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,6 +451,7 @@ pub struct VxlanSpec {
|
|||||||
pub struct MacVtapSpec {
|
pub struct MacVtapSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub promiscuous: Option<bool>,
|
pub promiscuous: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,6 +460,7 @@ pub struct MacVtapSpec {
|
|||||||
pub struct MacVlanSpec {
|
pub struct MacVlanSpec {
|
||||||
pub base_iface: String,
|
pub base_iface: String,
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub promiscuous: Option<bool>,
|
pub promiscuous: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,25 +475,35 @@ pub struct InfinibandSpec {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgeSpec {
|
pub struct LinuxBridgeSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<LinuxBridgeOptions>,
|
pub options: Option<LinuxBridgeOptions>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ports: Option<Vec<LinuxBridgePort>>,
|
pub ports: Option<Vec<LinuxBridgePort>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgeOptions {
|
pub struct LinuxBridgeOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mac_ageing_time: Option<u32>,
|
pub mac_ageing_time: Option<u32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub multicast_snooping: Option<bool>,
|
pub multicast_snooping: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub stp: Option<StpOptions>,
|
pub stp: Option<StpOptions>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct StpOptions {
|
pub struct StpOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub enabled: Option<bool>,
|
pub enabled: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub forward_delay: Option<u16>,
|
pub forward_delay: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub hello_time: Option<u16>,
|
pub hello_time: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub max_age: Option<u16>,
|
pub max_age: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub priority: Option<u16>,
|
pub priority: Option<u16>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,15 +511,20 @@ pub struct StpOptions {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgePort {
|
pub struct LinuxBridgePort {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LinuxBridgePortVlan {
|
pub struct LinuxBridgePortVlan {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mode: Option<String>,
|
pub mode: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub trunk_tags: Option<Vec<VlanTag>>,
|
pub trunk_tags: Option<Vec<VlanTag>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub tag: Option<u16>,
|
pub tag: Option<u16>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub enable_native: Option<bool>,
|
pub enable_native: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,6 +532,7 @@ pub struct LinuxBridgePortVlan {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct VlanTag {
|
pub struct VlanTag {
|
||||||
pub id: u16,
|
pub id: u16,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub id_range: Option<VlanIdRange>,
|
pub id_range: Option<VlanIdRange>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,36 +546,35 @@ pub struct VlanIdRange {
|
|||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsBridgeSpec {
|
pub struct OvsBridgeSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<OvsBridgeOptions>,
|
pub options: Option<OvsBridgeOptions>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ports: Option<Vec<OvsPortSpec>>,
|
pub ports: Option<Vec<OvsPortSpec>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsBridgeOptions {
|
pub struct OvsBridgeOptions {
|
||||||
pub stp: Option<bool>,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub stp: Option<StpSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub rstp: Option<bool>,
|
pub rstp: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mcast_snooping_enable: Option<bool>,
|
pub mcast_snooping_enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub datapath: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub fail_mode: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsPortSpec {
|
pub struct OvsPortSpec {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub link_aggregation: Option<BondSpec>,
|
pub link_aggregation: Option<BondSpec>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub vlan: Option<LinuxBridgePortVlan>,
|
pub vlan: Option<LinuxBridgePortVlan>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub r#type: Option<String>,
|
pub r#type: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EthtoolSpec {
|
|
||||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EthtoolFecSpec {
|
|
||||||
pub auto: Option<bool>,
|
|
||||||
pub mode: Option<String>,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::{debug, info};
|
use log::{info, warn};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -9,7 +9,7 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
topology::{HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
@@ -17,7 +17,7 @@ pub struct HostNetworkConfigurationScore {
|
|||||||
pub hosts: Vec<PhysicalHost>,
|
pub hosts: Vec<PhysicalHost>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
impl<T: Topology + NetworkManager + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"HostNetworkConfigurationScore".into()
|
"HostNetworkConfigurationScore".into()
|
||||||
}
|
}
|
||||||
@@ -35,34 +35,91 @@ pub struct HostNetworkConfigurationInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HostNetworkConfigurationInterpret {
|
impl HostNetworkConfigurationInterpret {
|
||||||
async fn configure_network_for_host<T: Topology + Switch>(
|
async fn configure_network_for_host<T: Topology + NetworkManager + Switch>(
|
||||||
&self,
|
&self,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
) -> Result<(), InterpretError> {
|
current_host: &usize,
|
||||||
let switch_ports = self.collect_switch_ports_for_host(topology, host).await?;
|
total_hosts: &usize,
|
||||||
if !switch_ports.is_empty() {
|
) -> Result<HostNetworkConfig, InterpretError> {
|
||||||
topology
|
if host.network.is_empty() {
|
||||||
.configure_host_network(host, HostNetworkConfig { switch_ports })
|
info!("[Host {current_host}/{total_hosts}] No interfaces to configure, skipping");
|
||||||
.await
|
return Ok(HostNetworkConfig {
|
||||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
host_id: host.id.clone(),
|
||||||
|
switch_ports: vec![],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if host.network.len() == 1 {
|
||||||
|
info!("[Host {current_host}/{total_hosts}] Only one interface to configure, skipping");
|
||||||
|
return Ok(HostNetworkConfig {
|
||||||
|
host_id: host.id.clone(),
|
||||||
|
switch_ports: vec![],
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
let switch_ports = self
|
||||||
|
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let config = HostNetworkConfig {
|
||||||
|
host_id: host.id.clone(),
|
||||||
|
switch_ports,
|
||||||
|
};
|
||||||
|
|
||||||
|
if config.switch_ports.len() > 1 {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
||||||
|
config.switch_ports.len(),
|
||||||
|
host.network.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
||||||
|
topology.configure_bond(&config).await.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||||
|
})?;
|
||||||
|
topology
|
||||||
|
.configure_port_channel(&config)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||||
|
})?;
|
||||||
|
} else if config.switch_ports.is_empty() {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
||||||
|
host.network.len()
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"[Host {current_host}/{total_hosts}] Found a single port for {} interfaces, skipping",
|
||||||
|
host.network.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
async fn collect_switch_ports_for_host<T: Topology + Switch>(
|
||||||
&self,
|
&self,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
|
current_host: &usize,
|
||||||
|
total_hosts: &usize,
|
||||||
) -> Result<Vec<SwitchPort>, InterpretError> {
|
) -> Result<Vec<SwitchPort>, InterpretError> {
|
||||||
let mut switch_ports = vec![];
|
let mut switch_ports = vec![];
|
||||||
|
|
||||||
|
if host.network.is_empty() {
|
||||||
|
return Ok(switch_ports);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("[Host {current_host}/{total_hosts}] Collecting ports on switch...");
|
||||||
for network_interface in &host.network {
|
for network_interface in &host.network {
|
||||||
let mac_address = network_interface.mac_address;
|
let mac_address = network_interface.mac_address;
|
||||||
|
|
||||||
match topology.get_port_for_mac_address(&mac_address).await {
|
match topology.get_port_for_mac_address(&mac_address).await {
|
||||||
Ok(Some(port)) => {
|
Ok(Some(port)) => {
|
||||||
|
info!(
|
||||||
|
"[Host {current_host}/{total_hosts}] Found port '{port}' for '{mac_address}'"
|
||||||
|
);
|
||||||
switch_ports.push(SwitchPort {
|
switch_ports.push(SwitchPort {
|
||||||
interface: NetworkInterface {
|
interface: NetworkInterface {
|
||||||
name: network_interface.name.clone(),
|
name: network_interface.name.clone(),
|
||||||
@@ -73,7 +130,7 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(None) => debug!("No port found for host '{}', skipping", host.id),
|
Ok(None) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Failed to get port for host '{}': {}",
|
"Failed to get port for host '{}': {}",
|
||||||
@@ -85,10 +142,42 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
|
|
||||||
Ok(switch_ports)
|
Ok(switch_ports)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn format_host_configuration(&self, configs: Vec<HostNetworkConfig>) -> Vec<String> {
|
||||||
|
let mut report = vec![
|
||||||
|
"Network Configuration Report".to_string(),
|
||||||
|
"------------------------------------------------------------------".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for config in configs {
|
||||||
|
if config.switch_ports.is_empty() {
|
||||||
|
report.push(format!(
|
||||||
|
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||||
|
config.host_id
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
let mappings: Vec<String> = config
|
||||||
|
.switch_ports
|
||||||
|
.iter()
|
||||||
|
.map(|p| format!("[{} -> {}]", p.interface.name, p.port))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
report.push(format!(
|
||||||
|
"✅ Host {}: Bonded {} port(s) {}",
|
||||||
|
config.host_id,
|
||||||
|
config.switch_ports.len(),
|
||||||
|
mappings.join(", ")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
report
|
||||||
|
.push("------------------------------------------------------------------".to_string());
|
||||||
|
report
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
||||||
}
|
}
|
||||||
@@ -114,27 +203,45 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
|||||||
return Ok(Outcome::noop("No hosts to configure".into()));
|
return Ok(Outcome::noop("No hosts to configure".into()));
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
let host_count = self.score.hosts.len();
|
||||||
"Started network configuration for {} host(s)...",
|
info!("Started network configuration for {host_count} host(s)...",);
|
||||||
self.score.hosts.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
info!("Setting up NetworkManager...",);
|
||||||
|
topology
|
||||||
|
.ensure_network_manager_installed()
|
||||||
|
.await
|
||||||
|
.map_err(|e| InterpretError::new(format!("NetworkManager setup failed: {e}")))?;
|
||||||
|
|
||||||
|
info!("Setting up switch with sane defaults...");
|
||||||
topology
|
topology
|
||||||
.setup_switch()
|
.setup_switch()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
.map_err(|e| InterpretError::new(format!("Switch setup failed: {e}")))?;
|
||||||
|
info!("Switch ready");
|
||||||
|
|
||||||
|
let mut current_host = 1;
|
||||||
|
let mut host_configurations = vec![];
|
||||||
|
|
||||||
let mut configured_host_count = 0;
|
|
||||||
for host in &self.score.hosts {
|
for host in &self.score.hosts {
|
||||||
self.configure_network_for_host(topology, host).await?;
|
let host_configuration = self
|
||||||
configured_host_count += 1;
|
.configure_network_for_host(topology, host, ¤t_host, &host_count)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
host_configurations.push(host_configuration);
|
||||||
|
current_host += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if configured_host_count > 0 {
|
if current_host > 1 {
|
||||||
Ok(Outcome::success(format!(
|
let details = self.format_host_configuration(host_configurations);
|
||||||
"Configured {configured_host_count}/{} host(s)",
|
|
||||||
self.score.hosts.len()
|
Ok(Outcome::success_with_details(
|
||||||
)))
|
format!(
|
||||||
|
"Configured {}/{} host(s)",
|
||||||
|
current_host - 1,
|
||||||
|
self.score.hosts.len()
|
||||||
|
),
|
||||||
|
details,
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
Ok(Outcome::noop("No hosts configured".into()))
|
Ok(Outcome::noop("No hosts configured".into()))
|
||||||
}
|
}
|
||||||
@@ -144,13 +251,15 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use assertor::*;
|
use assertor::*;
|
||||||
|
use brocade::PortOperatingMode;
|
||||||
use harmony_types::{net::MacAddress, switch::PortLocation};
|
use harmony_types::{net::MacAddress, switch::PortLocation};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
hardware::HostCategory,
|
hardware::HostCategory,
|
||||||
topology::{
|
topology::{
|
||||||
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome,
|
||||||
|
SwitchError, SwitchPort,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
@@ -175,6 +284,18 @@ mod tests {
|
|||||||
speed_mbps: None,
|
speed_mbps: None,
|
||||||
mtu: 1,
|
mtu: 1,
|
||||||
};
|
};
|
||||||
|
pub static ref YET_ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
|
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F3".to_string()).unwrap(),
|
||||||
|
name: "interface-3".into(),
|
||||||
|
speed_mbps: None,
|
||||||
|
mtu: 1,
|
||||||
|
};
|
||||||
|
pub static ref LAST_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
|
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F4".to_string()).unwrap(),
|
||||||
|
name: "interface-4".into(),
|
||||||
|
speed_mbps: None,
|
||||||
|
mtu: 1,
|
||||||
|
};
|
||||||
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
||||||
name: "unknown-interface".into(),
|
name: "unknown-interface".into(),
|
||||||
@@ -183,6 +304,8 @@ mod tests {
|
|||||||
};
|
};
|
||||||
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
||||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||||
|
pub static ref YET_ANOTHER_PORT: PortLocation = PortLocation(1, 0, 45);
|
||||||
|
pub static ref LAST_PORT: PortLocation = PortLocation(2, 0, 45);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -198,27 +321,33 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
async fn should_setup_network_manager() {
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
let score = given_score(vec![host]);
|
let score = given_score(vec![host]);
|
||||||
let topology = TopologyWithSwitch::new();
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
let network_manager_setup = topology.network_manager_setup.lock().unwrap();
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
assert_that!(*network_manager_setup).is_true();
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
switch_ports: vec![SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
async fn host_with_one_mac_address_should_skip_host_configuration() {
|
||||||
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
|
let score = given_score(vec![host]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let config = topology.configured_bonds.lock().unwrap();
|
||||||
|
assert_that!(*config).is_empty();
|
||||||
|
let config = topology.configured_port_channels.lock().unwrap();
|
||||||
|
assert_that!(*config).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn host_with_multiple_mac_addresses_should_configure_one_bond_with_all_interfaces() {
|
||||||
let score = given_score(vec![given_host(
|
let score = given_score(vec![given_host(
|
||||||
&HOST_ID,
|
&HOST_ID,
|
||||||
vec![
|
vec![
|
||||||
@@ -230,10 +359,11 @@ mod tests {
|
|||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
let config = topology.configured_bonds.lock().unwrap();
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
assert_that!(*config).contains_exactly(vec![(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
switch_ports: vec![
|
switch_ports: vec![
|
||||||
SwitchPort {
|
SwitchPort {
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
@@ -249,47 +379,183 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn multiple_hosts_should_create_one_bond_per_host() {
|
async fn host_with_multiple_mac_addresses_should_configure_one_port_channel_with_all_interfaces()
|
||||||
|
{
|
||||||
|
let score = given_score(vec![given_host(
|
||||||
|
&HOST_ID,
|
||||||
|
vec![
|
||||||
|
EXISTING_INTERFACE.clone(),
|
||||||
|
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
)]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let config = topology.configured_port_channels.lock().unwrap();
|
||||||
|
assert_that!(*config).contains_exactly(vec![(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![
|
||||||
|
SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn multiple_hosts_should_configure_one_bond_per_host() {
|
||||||
let score = given_score(vec![
|
let score = given_score(vec![
|
||||||
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
given_host(
|
||||||
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
&HOST_ID,
|
||||||
|
vec![
|
||||||
|
EXISTING_INTERFACE.clone(),
|
||||||
|
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
given_host(
|
||||||
|
&ANOTHER_HOST_ID,
|
||||||
|
vec![
|
||||||
|
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
LAST_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
),
|
||||||
]);
|
]);
|
||||||
let topology = TopologyWithSwitch::new();
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
let config = topology.configured_bonds.lock().unwrap();
|
||||||
assert_that!(*configured_host_networks).contains_exactly(vec![
|
assert_that!(*config).contains_exactly(vec![
|
||||||
(
|
(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
switch_ports: vec![SwitchPort {
|
host_id: HOST_ID.clone(),
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
switch_ports: vec![
|
||||||
port: PORT.clone(),
|
SwitchPort {
|
||||||
}],
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ANOTHER_HOST_ID.clone(),
|
ANOTHER_HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
switch_ports: vec![SwitchPort {
|
host_id: ANOTHER_HOST_ID.clone(),
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
switch_ports: vec![
|
||||||
port: ANOTHER_PORT.clone(),
|
SwitchPort {
|
||||||
}],
|
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: YET_ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||||
|
port: LAST_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
async fn multiple_hosts_should_configure_one_port_channel_per_host() {
|
||||||
|
let score = given_score(vec![
|
||||||
|
given_host(
|
||||||
|
&HOST_ID,
|
||||||
|
vec![
|
||||||
|
EXISTING_INTERFACE.clone(),
|
||||||
|
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
given_host(
|
||||||
|
&ANOTHER_HOST_ID,
|
||||||
|
vec![
|
||||||
|
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
LAST_EXISTING_INTERFACE.clone(),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let config = topology.configured_port_channels.lock().unwrap();
|
||||||
|
assert_that!(*config).contains_exactly(vec![
|
||||||
|
(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![
|
||||||
|
SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
ANOTHER_HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: ANOTHER_HOST_ID.clone(),
|
||||||
|
switch_ports: vec![
|
||||||
|
SwitchPort {
|
||||||
|
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
|
port: YET_ANOTHER_PORT.clone(),
|
||||||
|
},
|
||||||
|
SwitchPort {
|
||||||
|
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||||
|
port: LAST_PORT.clone(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn port_not_found_for_mac_address_should_not_configure_host() {
|
||||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||||
let topology = TopologyWithSwitch::new_port_not_found();
|
let topology = TopologyWithSwitch::new_port_not_found();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
let config = topology.configured_port_channels.lock().unwrap();
|
||||||
assert_that!(*configured_host_networks).is_empty();
|
assert_that!(*config).is_empty();
|
||||||
|
let config = topology.configured_bonds.lock().unwrap();
|
||||||
|
assert_that!(*config).is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn only_one_port_found_for_multiple_mac_addresses_should_not_configure_host() {
|
||||||
|
let score = given_score(vec![given_host(
|
||||||
|
&HOST_ID,
|
||||||
|
vec![EXISTING_INTERFACE.clone(), UNKNOWN_INTERFACE.clone()],
|
||||||
|
)]);
|
||||||
|
let topology = TopologyWithSwitch::new_single_port_found();
|
||||||
|
|
||||||
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
|
let config = topology.configured_port_channels.lock().unwrap();
|
||||||
|
assert_that!(*config).is_empty();
|
||||||
|
let config = topology.configured_bonds.lock().unwrap();
|
||||||
|
assert_that!(*config).is_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
||||||
@@ -326,26 +592,48 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
struct TopologyWithSwitch {
|
struct TopologyWithSwitch {
|
||||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||||
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
configured_port_channels: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||||
switch_setup: Arc<Mutex<bool>>,
|
switch_setup: Arc<Mutex<bool>>,
|
||||||
|
network_manager_setup: Arc<Mutex<bool>>,
|
||||||
|
configured_bonds: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TopologyWithSwitch {
|
impl TopologyWithSwitch {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
available_ports: Arc::new(Mutex::new(vec![
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
PORT.clone(),
|
||||||
|
ANOTHER_PORT.clone(),
|
||||||
|
YET_ANOTHER_PORT.clone(),
|
||||||
|
LAST_PORT.clone(),
|
||||||
|
])),
|
||||||
|
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
|
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||||
|
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_port_not_found() -> Self {
|
fn new_port_not_found() -> Self {
|
||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![])),
|
available_ports: Arc::new(Mutex::new(vec![])),
|
||||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
|
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||||
|
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_single_port_found() -> Self {
|
||||||
|
Self {
|
||||||
|
available_ports: Arc::new(Mutex::new(vec![PORT.clone()])),
|
||||||
|
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||||
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
|
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||||
|
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -361,6 +649,22 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NetworkManager for TopologyWithSwitch {
|
||||||
|
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||||
|
let mut network_manager_installed = self.network_manager_setup.lock().unwrap();
|
||||||
|
*network_manager_installed = true;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||||
|
let mut configured_bonds = self.configured_bonds.lock().unwrap();
|
||||||
|
configured_bonds.push((config.host_id.clone(), config.clone()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for TopologyWithSwitch {
|
impl Switch for TopologyWithSwitch {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
@@ -380,15 +684,23 @@ mod tests {
|
|||||||
Ok(Some(ports.remove(0)))
|
Ok(Some(ports.remove(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_host_network(
|
async fn configure_port_channel(
|
||||||
&self,
|
&self,
|
||||||
host: &PhysicalHost,
|
config: &HostNetworkConfig,
|
||||||
config: HostNetworkConfig,
|
|
||||||
) -> Result<(), SwitchError> {
|
) -> Result<(), SwitchError> {
|
||||||
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
let mut configured_port_channels = self.configured_port_channels.lock().unwrap();
|
||||||
configured_host_networks.push((host.id.clone(), config.clone()));
|
configured_port_channels.push((config.host_id.clone(), config.clone()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
async fn configure_interface(
|
||||||
|
&self,
|
||||||
|
port_config: &Vec<PortConfig>,
|
||||||
|
) -> Result<(), SwitchError> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,10 +48,13 @@
|
|||||||
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
modules::okd::{
|
modules::{
|
||||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
inventory::HarmonyDiscoveryStrategy,
|
||||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore,
|
okd::{
|
||||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||||
|
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||||
|
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::HAClusterTopology,
|
topology::HAClusterTopology,
|
||||||
@@ -60,12 +63,19 @@ use crate::{
|
|||||||
pub struct OKDInstallationPipeline;
|
pub struct OKDInstallationPipeline;
|
||||||
|
|
||||||
impl OKDInstallationPipeline {
|
impl OKDInstallationPipeline {
|
||||||
pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
pub async fn get_all_scores(
|
||||||
|
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||||
|
) -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
||||||
vec![
|
vec![
|
||||||
Box::new(OKDSetup01InventoryScore::new()),
|
Box::new(OKDSetup01InventoryScore::new()),
|
||||||
Box::new(OKDSetup02BootstrapScore::new()),
|
Box::new(OKDSetup02BootstrapScore::new()),
|
||||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
Box::new(OKDSetup03ControlPlaneScore {
|
||||||
Box::new(OKDSetup04WorkersScore::new()),
|
discovery_strategy: discovery_strategy.clone(),
|
||||||
|
}),
|
||||||
|
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
||||||
|
Box::new(OKDSetup04WorkersScore {
|
||||||
|
discovery_strategy: discovery_strategy.clone(),
|
||||||
|
}),
|
||||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -6,11 +6,14 @@ mod bootstrap_05_sanity_check;
|
|||||||
mod bootstrap_06_installation_report;
|
mod bootstrap_06_installation_report;
|
||||||
pub mod bootstrap_dhcp;
|
pub mod bootstrap_dhcp;
|
||||||
pub mod bootstrap_load_balancer;
|
pub mod bootstrap_load_balancer;
|
||||||
|
pub mod bootstrap_okd_node;
|
||||||
|
mod bootstrap_persist_network_bond;
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod installation;
|
pub mod installation;
|
||||||
pub mod ipxe;
|
pub mod ipxe;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
|
pub mod okd_node;
|
||||||
pub mod templates;
|
pub mod templates;
|
||||||
pub mod upgrade;
|
pub mod upgrade;
|
||||||
pub use bootstrap_01_prepare::*;
|
pub use bootstrap_01_prepare::*;
|
||||||
@@ -19,5 +22,6 @@ pub use bootstrap_03_control_plane::*;
|
|||||||
pub use bootstrap_04_workers::*;
|
pub use bootstrap_04_workers::*;
|
||||||
pub use bootstrap_05_sanity_check::*;
|
pub use bootstrap_05_sanity_check::*;
|
||||||
pub use bootstrap_06_installation_report::*;
|
pub use bootstrap_06_installation_report::*;
|
||||||
|
pub use bootstrap_persist_network_bond::*;
|
||||||
pub mod crd;
|
pub mod crd;
|
||||||
pub mod host_network;
|
pub mod host_network;
|
||||||
|
|||||||
54
harmony/src/modules/okd/okd_node.rs
Normal file
54
harmony/src/modules/okd/okd_node.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
use crate::topology::{HAClusterTopology, LogicalHost};
|
||||||
|
|
||||||
|
pub trait OKDRoleProperties {
|
||||||
|
fn ignition_file(&self) -> &'static str;
|
||||||
|
fn required_hosts(&self) -> i16;
|
||||||
|
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BootstrapRole;
|
||||||
|
pub struct ControlPlaneRole;
|
||||||
|
pub struct WorkerRole;
|
||||||
|
pub struct StorageRole;
|
||||||
|
|
||||||
|
impl OKDRoleProperties for BootstrapRole {
|
||||||
|
fn ignition_file(&self) -> &'static str {
|
||||||
|
"bootstrap.ign"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn required_hosts(&self) -> i16 {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDRoleProperties for ControlPlaneRole {
|
||||||
|
fn ignition_file(&self) -> &'static str {
|
||||||
|
"master.ign"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn required_hosts(&self) -> i16 {
|
||||||
|
3
|
||||||
|
}
|
||||||
|
|
||||||
|
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||||
|
&t.control_plane
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDRoleProperties for WorkerRole {
|
||||||
|
fn ignition_file(&self) -> &'static str {
|
||||||
|
"worker.ign"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn required_hosts(&self) -> i16 {
|
||||||
|
2
|
||||||
|
}
|
||||||
|
|
||||||
|
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||||
|
&t.workers
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod node_exporter;
|
||||||
mod shell;
|
mod shell;
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
pub use shell::*;
|
pub use shell::*;
|
||||||
|
|||||||
70
harmony/src/modules/opnsense/node_exporter.rs
Normal file
70
harmony/src/modules/opnsense/node_exporter.rs
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::info;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{Topology, node_exporter::NodeExporter},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct NodeExporterScore {}
|
||||||
|
|
||||||
|
impl<T: Topology + NodeExporter> Score<T> for NodeExporterScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"NodeExporterScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(NodeExporterInterpret {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NodeExporterInterpret {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + NodeExporter> Interpret<T> for NodeExporterInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
node_exporter: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
|
"Making sure node exporter is initiailized: {:?}",
|
||||||
|
node_exporter.ensure_initialized().await?
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("Applying Node Exporter configuration");
|
||||||
|
|
||||||
|
node_exporter.commit_config().await?;
|
||||||
|
|
||||||
|
info!("Reloading and restarting Node Exporter");
|
||||||
|
|
||||||
|
node_exporter.reload_restart().await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"NodeExporter successfully configured"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("NodeExporter")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user