Compare commits
9 Commits
master
...
feat/insta
| Author | SHA1 | Date | |
|---|---|---|---|
| c2fa4f1869 | |||
| ee278ac817 | |||
| 09a06f136e | |||
| 5f147fa672 | |||
| 9ba939bde1 | |||
| 44bf21718c | |||
| 5ab58f0253 | |||
| 5af13800b7 | |||
| 8126b233d8 |
15
Cargo.lock
generated
15
Cargo.lock
generated
@@ -6049,21 +6049,6 @@ version = "0.5.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "test-score"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"base64 0.22.1",
|
|
||||||
"env_logger",
|
|
||||||
"harmony",
|
|
||||||
"harmony_cli",
|
|
||||||
"harmony_macros",
|
|
||||||
"harmony_types",
|
|
||||||
"log",
|
|
||||||
"tokio",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.69"
|
version = "1.0.69"
|
||||||
|
|||||||
@@ -1,114 +0,0 @@
|
|||||||
# Architecture Decision Record: Higher-Order Topologies
|
|
||||||
|
|
||||||
**Initial Author:** Jean-Gabriel Gill-Couture
|
|
||||||
**Initial Date:** 2025-12-08
|
|
||||||
**Last Updated Date:** 2025-12-08
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Implemented
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
|
||||||
|
|
||||||
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
|
||||||
|
|
||||||
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
|
||||||
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
|
||||||
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
|
||||||
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
|
||||||
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
|
||||||
|
|
||||||
This makes scaling Harmony impractical as ecosystem grows.
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
|
||||||
|
|
||||||
````rust
|
|
||||||
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
|
||||||
pub struct FailoverTopology<T> {
|
|
||||||
/// Primary sub-topology.
|
|
||||||
primary: T,
|
|
||||||
/// Replica sub-topology.
|
|
||||||
replica: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
|
||||||
/// Delegates to primary for queries; orchestrates deploy across both.
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
|
||||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
|
||||||
// Deploy primary; extract certs/endpoint;
|
|
||||||
// deploy replica with pg_basebackup + TLS passthrough.
|
|
||||||
// (Full impl logged/elaborated.)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delegate queries to primary.
|
|
||||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
|
||||||
self.primary.get_replication_certs(cluster_name).await
|
|
||||||
}
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Similarly for other capabilities.
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Docker> Docker for FailoverTopology<T> {
|
|
||||||
// Failover Docker orchestration.
|
|
||||||
}
|
|
||||||
````
|
|
||||||
|
|
||||||
**Key properties:**
|
|
||||||
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
|
||||||
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
|
||||||
|
|
||||||
## Rationale
|
|
||||||
|
|
||||||
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
|
||||||
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
|
||||||
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
|
||||||
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
|
||||||
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
|
||||||
|
|
||||||
**Example usage:**
|
|
||||||
````rust
|
|
||||||
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
|
||||||
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
|
||||||
pg_failover.deploy_pg(config).await;
|
|
||||||
|
|
||||||
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
|
||||||
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
|
||||||
docker_failover.deploy_docker(...).await;
|
|
||||||
|
|
||||||
// ❌ Compile fail: K8sAnywhere !: Docker
|
|
||||||
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
|
||||||
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
|
||||||
````
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
**Pros:**
|
|
||||||
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
|
||||||
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
|
||||||
- **Observable**: Logs trace every step.
|
|
||||||
|
|
||||||
**Cons:**
|
|
||||||
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
|
||||||
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
|
||||||
|
|
||||||
## Alternatives considered
|
|
||||||
|
|
||||||
| Approach | Pros | Cons |
|
|
||||||
|----------|------|------|
|
|
||||||
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
|
||||||
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
|
||||||
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
|
||||||
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
|
||||||
|
|
||||||
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
|
||||||
|
|
||||||
## Additional Notes
|
|
||||||
|
|
||||||
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
|
||||||
- `FailoverTopology` in `failover.rs` is first implementation.
|
|
||||||
@@ -1,153 +0,0 @@
|
|||||||
//! Example of Higher-Order Topologies in Harmony.
|
|
||||||
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
|
||||||
//! supported by a sub-topology `T` via blanket trait impls.
|
|
||||||
//!
|
|
||||||
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
|
||||||
//! Users can:
|
|
||||||
//! - Write new `Topology` (impl capabilities on a struct).
|
|
||||||
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
|
||||||
//! - Compile fails if capability missing (safety).
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use tokio;
|
|
||||||
|
|
||||||
/// Capability trait: Deploy and manage PostgreSQL.
|
|
||||||
#[async_trait]
|
|
||||||
pub trait PostgreSQL {
|
|
||||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
|
||||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Capability trait: Deploy Docker.
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Docker {
|
|
||||||
async fn deploy_docker(&self) -> Result<String, String>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration for PostgreSQL deployments.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct PostgreSQLConfig;
|
|
||||||
|
|
||||||
/// Replication certificates.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ReplicationCerts;
|
|
||||||
|
|
||||||
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct K8sAnywhereTopology;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl PostgreSQL for K8sAnywhereTopology {
|
|
||||||
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
|
||||||
// Real impl: Use k8s helm chart, operator, etc.
|
|
||||||
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
|
||||||
Ok(ReplicationCerts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Concrete topology: Linux Host (supports Docker).
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct LinuxHostTopology;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Docker for LinuxHostTopology {
|
|
||||||
async fn deploy_docker(&self) -> Result<String, String> {
|
|
||||||
// Real impl: Install/configure Docker on host.
|
|
||||||
Ok("LinuxHost Docker deployed".to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
|
||||||
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
|
||||||
///
|
|
||||||
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
|
||||||
/// - Same for `Docker`, etc. No boilerplate!
|
|
||||||
/// - Compile-time safe: Missing `T: Capability` → error.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct FailoverTopology<T> {
|
|
||||||
/// Primary sub-topology.
|
|
||||||
pub primary: T,
|
|
||||||
/// Replica sub-topology.
|
|
||||||
pub replica: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
|
||||||
/// Delegates reads to primary; deploys to both.
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
|
||||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
|
||||||
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
|
||||||
let primary_result = self.primary.deploy(config).await?;
|
|
||||||
let replica_result = self.replica.deploy(config).await?;
|
|
||||||
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
|
||||||
// Delegate to primary (replica follows).
|
|
||||||
self.primary.get_replication_certs(cluster_name).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blanket impl: Failover Docker if T provides Docker.
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
|
||||||
async fn deploy_docker(&self) -> Result<String, String> {
|
|
||||||
// Orchestrate across primary + replica.
|
|
||||||
let primary_result = self.primary.deploy_docker().await?;
|
|
||||||
let replica_result = self.replica.deploy_docker().await?;
|
|
||||||
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let config = PostgreSQLConfig;
|
|
||||||
|
|
||||||
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
|
||||||
let pg_failover = FailoverTopology {
|
|
||||||
primary: K8sAnywhereTopology,
|
|
||||||
replica: K8sAnywhereTopology,
|
|
||||||
};
|
|
||||||
let result = pg_failover.deploy(&config).await.unwrap();
|
|
||||||
println!("Result: {}", result);
|
|
||||||
|
|
||||||
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
|
||||||
let docker_failover = FailoverTopology {
|
|
||||||
primary: LinuxHostTopology,
|
|
||||||
replica: LinuxHostTopology,
|
|
||||||
};
|
|
||||||
let result = docker_failover.deploy_docker().await.unwrap();
|
|
||||||
println!("Result: {}", result);
|
|
||||||
|
|
||||||
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
|
||||||
// let invalid = FailoverTopology {
|
|
||||||
// primary: K8sAnywhereTopology,
|
|
||||||
// replica: K8sAnywhereTopology,
|
|
||||||
// };
|
|
||||||
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
|
||||||
// Very clear error message :
|
|
||||||
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
|
||||||
// --> src/main.rs:90:9
|
|
||||||
// |
|
|
||||||
// 4 | pub struct FailoverTopology<T> {
|
|
||||||
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
|
||||||
// ...
|
|
||||||
// 37 | struct K8sAnywhereTopology;
|
|
||||||
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
|
||||||
// ...
|
|
||||||
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
|
||||||
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
|
||||||
// |
|
|
||||||
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
|
||||||
// --> src/main.rs:61:9
|
|
||||||
// |
|
|
||||||
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
|
||||||
// | ^^^^^^ ------ -------------------
|
|
||||||
// | |
|
|
||||||
// | unsatisfied trait bound introduced here
|
|
||||||
// note: the trait `Docker` must be implemented
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::{Arc, OnceLock},
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
use brocade::BrocadeOptions;
|
||||||
@@ -106,8 +106,8 @@ async fn main() {
|
|||||||
name: "wk2".to_string(),
|
name: "wk2".to_string(),
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
network_manager: OnceLock::new(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
|
|||||||
@@ -9,10 +9,7 @@ use harmony::{
|
|||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{
|
use std::{net::IpAddr, sync::Arc};
|
||||||
net::IpAddr,
|
|
||||||
sync::{Arc, OnceLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||||
struct OPNSenseFirewallConfig {
|
struct OPNSenseFirewallConfig {
|
||||||
@@ -83,8 +80,8 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "bootstrap".to_string(),
|
name: "bootstrap".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
network_manager: OnceLock::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,10 +10,7 @@ use harmony::{
|
|||||||
use harmony_macros::{ip, ipv4};
|
use harmony_macros::{ip, ipv4};
|
||||||
use harmony_secret::{Secret, SecretManager};
|
use harmony_secret::{Secret, SecretManager};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{
|
use std::{net::IpAddr, sync::Arc};
|
||||||
net::IpAddr,
|
|
||||||
sync::{Arc, OnceLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub async fn get_topology() -> HAClusterTopology {
|
pub async fn get_topology() -> HAClusterTopology {
|
||||||
let firewall = harmony::topology::LogicalHost {
|
let firewall = harmony::topology::LogicalHost {
|
||||||
@@ -78,8 +75,8 @@ pub async fn get_topology() -> HAClusterTopology {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
network_manager: OnceLock::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
sync::{Arc, OnceLock},
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use brocade::BrocadeOptions;
|
use brocade::BrocadeOptions;
|
||||||
@@ -78,8 +78,8 @@ async fn main() {
|
|||||||
name: "cp0".to_string(),
|
name: "cp0".to_string(),
|
||||||
},
|
},
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
switch_client: switch_client.clone(),
|
switch_client: switch_client.clone(),
|
||||||
network_manager: OnceLock::new(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let inventory = Inventory {
|
let inventory = Inventory {
|
||||||
|
|||||||
21
examples/opnsense_node_exporter/Cargo.toml
Normal file
21
examples/opnsense_node_exporter/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
[package]
|
||||||
|
name = "example-opnsense-node-exporter"
|
||||||
|
edition = "2024"
|
||||||
|
version.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
harmony = { path = "../../harmony" }
|
||||||
|
harmony_cli = { path = "../../harmony_cli" }
|
||||||
|
harmony_types = { path = "../../harmony_types" }
|
||||||
|
harmony_secret = { path = "../../harmony_secret" }
|
||||||
|
harmony_secret_derive = { path = "../../harmony_secret_derive" }
|
||||||
|
cidr = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
harmony_macros = { path = "../../harmony_macros" }
|
||||||
|
log = { workspace = true }
|
||||||
|
env_logger = { workspace = true }
|
||||||
|
url = { workspace = true }
|
||||||
|
serde.workspace = true
|
||||||
|
async-trait.workspace = true
|
||||||
79
examples/opnsense_node_exporter/src/main.rs
Normal file
79
examples/opnsense_node_exporter/src/main.rs
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
use std::{
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use cidr::Ipv4Cidr;
|
||||||
|
use harmony::{
|
||||||
|
executors::ExecutorError,
|
||||||
|
hardware::{HostCategory, Location, PhysicalHost, SwitchGroup},
|
||||||
|
infra::opnsense::OPNSenseManagementInterface,
|
||||||
|
inventory::Inventory,
|
||||||
|
modules::opnsense::node_exporter::NodeExporterScore,
|
||||||
|
topology::{
|
||||||
|
HAClusterTopology, LogicalHost, PreparationError, PreparationOutcome, Topology,
|
||||||
|
UnmanagedRouter, node_exporter::NodeExporter,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use harmony_macros::{ip, ipv4, mac_address};
|
||||||
|
|
||||||
|
struct OpnSenseTopology {
|
||||||
|
node_exporter: Arc<dyn NodeExporter>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Topology for OpnSenseTopology {
|
||||||
|
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||||
|
Ok(PreparationOutcome::Success {
|
||||||
|
details: "Success".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"OpnsenseTopology"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for OpnSenseTopology {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.ensure_initialized().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.commit_config().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.reload_restart().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let firewall = harmony::topology::LogicalHost {
|
||||||
|
ip: ip!("192.168.1.1"),
|
||||||
|
name: String::from("fw0"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let opnsense = Arc::new(
|
||||||
|
harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await,
|
||||||
|
);
|
||||||
|
|
||||||
|
let topology = OpnSenseTopology {
|
||||||
|
node_exporter: opnsense.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let inventory = Inventory::empty();
|
||||||
|
|
||||||
|
let node_exporter_score = NodeExporterScore {};
|
||||||
|
|
||||||
|
harmony_cli::run(
|
||||||
|
inventory,
|
||||||
|
topology,
|
||||||
|
vec![Box::new(node_exporter_score)],
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
@@ -1,6 +1,4 @@
|
|||||||
mod repository;
|
mod repository;
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
pub use repository::*;
|
pub use repository::*;
|
||||||
|
|
||||||
#[derive(Debug, new, Clone)]
|
#[derive(Debug, new, Clone)]
|
||||||
@@ -71,14 +69,5 @@ pub enum HostRole {
|
|||||||
Bootstrap,
|
Bootstrap,
|
||||||
ControlPlane,
|
ControlPlane,
|
||||||
Worker,
|
Worker,
|
||||||
}
|
Storage,
|
||||||
|
|
||||||
impl fmt::Display for HostRole {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
HostRole::Bootstrap => write!(f, "Bootstrap"),
|
|
||||||
HostRole::ControlPlane => write!(f, "ControlPlane"),
|
|
||||||
HostRole::Worker => write!(f, "Worker"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,32 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_macros::ip;
|
use harmony_macros::ip;
|
||||||
use harmony_types::{
|
use harmony_types::{
|
||||||
id::Id,
|
|
||||||
net::{MacAddress, Url},
|
net::{MacAddress, Url},
|
||||||
switch::PortLocation,
|
switch::PortLocation,
|
||||||
};
|
};
|
||||||
|
use kube::api::ObjectMeta;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use crate::infra::network_manager::OpenShiftNmStateNetworkManager;
|
|
||||||
use crate::topology::PxeOptions;
|
use crate::topology::PxeOptions;
|
||||||
use crate::{data::FileContent, executors::ExecutorError};
|
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
|
||||||
|
use crate::{
|
||||||
|
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy},
|
||||||
|
topology::node_exporter::NodeExporter,
|
||||||
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
||||||
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
||||||
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
Topology, k8s::K8sClient,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::sync::{Arc, OnceLock};
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HAClusterTopology {
|
pub struct HAClusterTopology {
|
||||||
@@ -31,12 +38,12 @@ pub struct HAClusterTopology {
|
|||||||
pub tftp_server: Arc<dyn TftpServer>,
|
pub tftp_server: Arc<dyn TftpServer>,
|
||||||
pub http_server: Arc<dyn HttpServer>,
|
pub http_server: Arc<dyn HttpServer>,
|
||||||
pub dns_server: Arc<dyn DnsServer>,
|
pub dns_server: Arc<dyn DnsServer>,
|
||||||
|
pub node_exporter: Arc<dyn NodeExporter>,
|
||||||
pub switch_client: Arc<dyn SwitchClient>,
|
pub switch_client: Arc<dyn SwitchClient>,
|
||||||
pub bootstrap_host: LogicalHost,
|
pub bootstrap_host: LogicalHost,
|
||||||
pub control_plane: Vec<LogicalHost>,
|
pub control_plane: Vec<LogicalHost>,
|
||||||
pub workers: Vec<LogicalHost>,
|
pub workers: Vec<LogicalHost>,
|
||||||
pub kubeconfig: Option<String>,
|
pub kubeconfig: Option<String>,
|
||||||
pub network_manager: OnceLock<Arc<dyn NetworkManager>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -60,7 +67,7 @@ impl K8sclient for HAClusterTopology {
|
|||||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||||
)),
|
)),
|
||||||
Some(kubeconfig) => {
|
Some(kubeconfig) => {
|
||||||
let Some(client) = K8sClient::from_kubeconfig(kubeconfig).await else {
|
let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
|
||||||
return Err("Failed to create k8s client".to_string());
|
return Err("Failed to create k8s client".to_string());
|
||||||
};
|
};
|
||||||
Ok(Arc::new(client))
|
Ok(Arc::new(client))
|
||||||
@@ -90,12 +97,191 @@ impl HAClusterTopology {
|
|||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn network_manager(&self) -> &dyn NetworkManager {
|
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
||||||
let k8s_client = self.k8s_client().await.unwrap();
|
let k8s_client = self.k8s_client().await?;
|
||||||
|
|
||||||
self.network_manager
|
debug!("Installing NMState controller...");
|
||||||
.get_or_init(|| Arc::new(OpenShiftNmStateNetworkManager::new(k8s_client.clone())))
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||||
.as_ref()
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState namespace...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState service account...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState role binding...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
debug!("Creating NMState operator...");
|
||||||
|
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||||
|
").unwrap(), Some("nmstate"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
k8s_client
|
||||||
|
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let nmstate = NMState {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some("nmstate".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
debug!("Creating NMState: {nmstate:#?}");
|
||||||
|
k8s_client
|
||||||
|
.apply(&nmstate, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_next_bond_id(&self) -> u8 {
|
||||||
|
42 // FIXME: Find a better way to declare the bond id
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
self.ensure_nmstate_operator_installed()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
SwitchError::new(format!(
|
||||||
|
"Can't configure bond, NMState operator not available: {e}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let bond_config = self.create_bond_configuration(config);
|
||||||
|
debug!(
|
||||||
|
"Applying NMState bond config for host {}: {bond_config:#?}",
|
||||||
|
config.host_id
|
||||||
|
);
|
||||||
|
self.k8s_client()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.apply(&bond_config, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_bond_configuration(
|
||||||
|
&self,
|
||||||
|
config: &HostNetworkConfig,
|
||||||
|
) -> NodeNetworkConfigurationPolicy {
|
||||||
|
let host_name = &config.host_id;
|
||||||
|
let bond_id = self.get_next_bond_id();
|
||||||
|
let bond_name = format!("bond{bond_id}");
|
||||||
|
|
||||||
|
info!("Configuring bond '{bond_name}' for host '{host_name}'...");
|
||||||
|
|
||||||
|
let mut bond_mtu: Option<u32> = None;
|
||||||
|
let mut copy_mac_from: Option<String> = None;
|
||||||
|
let mut bond_ports = Vec::new();
|
||||||
|
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
||||||
|
|
||||||
|
for switch_port in &config.switch_ports {
|
||||||
|
let interface_name = switch_port.interface.name.clone();
|
||||||
|
|
||||||
|
interfaces.push(nmstate::InterfaceSpec {
|
||||||
|
name: interface_name.clone(),
|
||||||
|
description: Some(format!("Member of bond {bond_name}")),
|
||||||
|
r#type: "ethernet".to_string(),
|
||||||
|
state: "up".to_string(),
|
||||||
|
mtu: Some(switch_port.interface.mtu),
|
||||||
|
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
enabled: Some(false),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: None,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
bond_ports.push(interface_name.clone());
|
||||||
|
|
||||||
|
// Use the first port's details for the bond mtu and mac address
|
||||||
|
if bond_mtu.is_none() {
|
||||||
|
bond_mtu = Some(switch_port.interface.mtu);
|
||||||
|
}
|
||||||
|
if copy_mac_from.is_none() {
|
||||||
|
copy_mac_from = Some(interface_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces.push(nmstate::InterfaceSpec {
|
||||||
|
name: bond_name.clone(),
|
||||||
|
description: Some(format!("Network bond for host {host_name}")),
|
||||||
|
r#type: "bond".to_string(),
|
||||||
|
state: "up".to_string(),
|
||||||
|
copy_mac_from,
|
||||||
|
ipv4: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
ipv6: Some(nmstate::IpStackSpec {
|
||||||
|
dhcp: Some(true),
|
||||||
|
autoconf: Some(true),
|
||||||
|
enabled: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
link_aggregation: Some(nmstate::BondSpec {
|
||||||
|
mode: "802.3ad".to_string(),
|
||||||
|
ports: bond_ports,
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
NodeNetworkConfigurationPolicy {
|
||||||
|
metadata: ObjectMeta {
|
||||||
|
name: Some(format!("{host_name}-bond-config")),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
spec: NodeNetworkConfigurationPolicySpec {
|
||||||
|
node_selector: Some(BTreeMap::from([(
|
||||||
|
"kubernetes.io/hostname".to_string(),
|
||||||
|
host_name.to_string(),
|
||||||
|
)])),
|
||||||
|
desired_state: nmstate::DesiredStateSpec { interfaces },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
|
debug!("Configuring port channel: {config:#?}");
|
||||||
|
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||||
|
|
||||||
|
self.switch_client
|
||||||
|
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||||
|
.await
|
||||||
|
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn autoload() -> Self {
|
pub fn autoload() -> Self {
|
||||||
@@ -115,11 +301,11 @@ impl HAClusterTopology {
|
|||||||
tftp_server: dummy_infra.clone(),
|
tftp_server: dummy_infra.clone(),
|
||||||
http_server: dummy_infra.clone(),
|
http_server: dummy_infra.clone(),
|
||||||
dns_server: dummy_infra.clone(),
|
dns_server: dummy_infra.clone(),
|
||||||
|
node_exporter: dummy_infra.clone(),
|
||||||
switch_client: dummy_infra.clone(),
|
switch_client: dummy_infra.clone(),
|
||||||
bootstrap_host: dummy_host,
|
bootstrap_host: dummy_host,
|
||||||
control_plane: vec![],
|
control_plane: vec![],
|
||||||
workers: vec![],
|
workers: vec![],
|
||||||
network_manager: OnceLock::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -277,40 +463,38 @@ impl HttpServer for HAClusterTopology {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for HAClusterTopology {
|
impl Switch for HAClusterTopology {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
self.switch_client.setup().await.map(|_| ())
|
self.switch_client.setup().await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_port_for_mac_address(
|
async fn get_port_for_mac_address(
|
||||||
&self,
|
&self,
|
||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError> {
|
) -> Result<Option<PortLocation>, SwitchError> {
|
||||||
self.switch_client.find_port(mac_address).await
|
let port = self.switch_client.find_port(mac_address).await?;
|
||||||
|
Ok(port)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||||
debug!("Configuring port channel: {config:#?}");
|
self.configure_bond(config).await?;
|
||||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
self.configure_port_channel(config).await
|
||||||
|
|
||||||
self.switch_client
|
|
||||||
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
|
||||||
.await
|
|
||||||
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO add snmp here
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl NetworkManager for HAClusterTopology {
|
impl NodeExporter for HAClusterTopology {
|
||||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
self.network_manager()
|
self.node_exporter.ensure_initialized().await
|
||||||
.await
|
|
||||||
.ensure_network_manager_installed()
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
self.network_manager().await.configure_bond(config).await
|
self.node_exporter.commit_config().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.node_exporter.reload_restart().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,6 +685,21 @@ impl DnsServer for DummyInfra {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for DummyInfra {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl SwitchClient for DummyInfra {
|
impl SwitchClient for DummyInfra {
|
||||||
async fn setup(&self) -> Result<(), SwitchError> {
|
async fn setup(&self) -> Result<(), SwitchError> {
|
||||||
|
|||||||
@@ -5,15 +5,13 @@ use k8s_openapi::{
|
|||||||
ClusterResourceScope, NamespaceResourceScope,
|
ClusterResourceScope, NamespaceResourceScope,
|
||||||
api::{
|
api::{
|
||||||
apps::v1::Deployment,
|
apps::v1::Deployment,
|
||||||
core::v1::{Node, Pod, ServiceAccount},
|
core::v1::{Pod, ServiceAccount},
|
||||||
},
|
},
|
||||||
apimachinery::pkg::version::Info,
|
apimachinery::pkg::version::Info,
|
||||||
};
|
};
|
||||||
use kube::{
|
use kube::{
|
||||||
Client, Config, Discovery, Error, Resource,
|
Client, Config, Discovery, Error, Resource,
|
||||||
api::{
|
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||||
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
|
||||||
},
|
|
||||||
config::{KubeConfigOptions, Kubeconfig},
|
config::{KubeConfigOptions, Kubeconfig},
|
||||||
core::ErrorResponse,
|
core::ErrorResponse,
|
||||||
discovery::{ApiCapabilities, Scope},
|
discovery::{ApiCapabilities, Scope},
|
||||||
@@ -25,7 +23,7 @@ use kube::{
|
|||||||
api::{ApiResource, GroupVersionKind},
|
api::{ApiResource, GroupVersionKind},
|
||||||
runtime::wait::await_condition,
|
runtime::wait::await_condition,
|
||||||
};
|
};
|
||||||
use log::{debug, error, trace, warn};
|
use log::{debug, error, info, trace, warn};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use similar::TextDiff;
|
use similar::TextDiff;
|
||||||
@@ -566,58 +564,7 @@ impl K8sClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a single named resource of a specific type `K`.
|
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||||
///
|
|
||||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
|
||||||
/// whether to look in a specific namespace or in the entire cluster.
|
|
||||||
///
|
|
||||||
/// Returns `Ok(None)` if the resource is not found (404).
|
|
||||||
pub async fn get_resource<K>(
|
|
||||||
&self,
|
|
||||||
name: &str,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
) -> Result<Option<K>, Error>
|
|
||||||
where
|
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
|
||||||
{
|
|
||||||
let api: Api<K> =
|
|
||||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
|
||||||
|
|
||||||
api.get_opt(name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lists all resources of a specific type `K`.
|
|
||||||
///
|
|
||||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
|
||||||
/// whether to list from a specific namespace or from the entire cluster.
|
|
||||||
pub async fn list_resources<K>(
|
|
||||||
&self,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
list_params: Option<ListParams>,
|
|
||||||
) -> Result<ObjectList<K>, Error>
|
|
||||||
where
|
|
||||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
|
||||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
|
||||||
<K as kube::Resource>::DynamicType: Default,
|
|
||||||
{
|
|
||||||
let api: Api<K> =
|
|
||||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
|
||||||
|
|
||||||
let list_params = list_params.unwrap_or_default();
|
|
||||||
api.list(&list_params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetches a list of all Nodes in the cluster.
|
|
||||||
pub async fn get_nodes(
|
|
||||||
&self,
|
|
||||||
list_params: Option<ListParams>,
|
|
||||||
) -> Result<ObjectList<Node>, Error> {
|
|
||||||
self.list_resources(None, list_params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
|
||||||
let k = match Kubeconfig::read_from(path) {
|
let k = match Kubeconfig::read_from(path) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
mod ha_cluster;
|
mod ha_cluster;
|
||||||
pub mod ingress;
|
pub mod ingress;
|
||||||
|
pub mod node_exporter;
|
||||||
use harmony_types::net::IpAddress;
|
use harmony_types::net::IpAddress;
|
||||||
mod host_binding;
|
mod host_binding;
|
||||||
mod http;
|
mod http;
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use harmony_types::{
|
|||||||
};
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::executors::ExecutorError;
|
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
||||||
|
|
||||||
use super::{LogicalHost, k8s::K8sClient};
|
use super::{LogicalHost, k8s::K8sClient};
|
||||||
|
|
||||||
@@ -183,37 +183,6 @@ impl FromStr for DnsRecordType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait NetworkManager: Debug + Send + Sync {
|
|
||||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError>;
|
|
||||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, new)]
|
|
||||||
pub struct NetworkError {
|
|
||||||
msg: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for NetworkError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
f.write_str(&self.msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for NetworkError {}
|
|
||||||
|
|
||||||
impl From<kube::Error> for NetworkError {
|
|
||||||
fn from(value: kube::Error) -> Self {
|
|
||||||
NetworkError::new(value.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<String> for NetworkError {
|
|
||||||
fn from(value: String) -> Self {
|
|
||||||
NetworkError::new(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Switch: Send + Sync {
|
pub trait Switch: Send + Sync {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||||
@@ -223,7 +192,7 @@ pub trait Switch: Send + Sync {
|
|||||||
mac_address: &MacAddress,
|
mac_address: &MacAddress,
|
||||||
) -> Result<Option<PortLocation>, SwitchError>;
|
) -> Result<Option<PortLocation>, SwitchError>;
|
||||||
|
|
||||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
|||||||
17
harmony/src/domain/topology/node_exporter.rs
Normal file
17
harmony/src/domain/topology/node_exporter.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use crate::executors::ExecutorError;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait NodeExporter: Send + Sync {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError>;
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError>;
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO complete this impl
|
||||||
|
impl std::fmt::Debug for dyn NodeExporter {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_fmt(format_args!("NodeExporter ",))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,182 +0,0 @@
|
|||||||
use k8s_openapi::Resource as K8sResource;
|
|
||||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
|
||||||
use kube::core::TypeMeta;
|
|
||||||
use serde::Serialize;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
|
||||||
///
|
|
||||||
/// Requirements:
|
|
||||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
|
||||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
|
||||||
///
|
|
||||||
/// Notes:
|
|
||||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
|
||||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
|
||||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
|
||||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
|
||||||
///
|
|
||||||
/// Example usage:
|
|
||||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
|
||||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
|
||||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
|
||||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
|
||||||
where
|
|
||||||
K: K8sResource + Serialize + DeserializeOwned,
|
|
||||||
{
|
|
||||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
|
||||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
|
||||||
let obj = v
|
|
||||||
.as_object_mut()
|
|
||||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
|
||||||
|
|
||||||
// Extract and parse metadata into kube::core::ObjectMeta
|
|
||||||
let metadata_value = obj
|
|
||||||
.remove("metadata")
|
|
||||||
.ok_or_else(|| "missing metadata".to_string())?;
|
|
||||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
|
||||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
|
||||||
|
|
||||||
// Name is required for DynamicObject::new; prefer metadata.name
|
|
||||||
let name = metadata
|
|
||||||
.name
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
|
||||||
|
|
||||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
|
||||||
let payload = Value::Object(obj.clone());
|
|
||||||
|
|
||||||
// Construct the DynamicObject
|
|
||||||
let mut dyn_obj = DynamicObject::new(
|
|
||||||
&name,
|
|
||||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
|
||||||
);
|
|
||||||
dyn_obj.types = Some(TypeMeta {
|
|
||||||
api_version: api_version_for::<K>(),
|
|
||||||
kind: K::KIND.into(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Preserve namespace/labels/annotations/etc.
|
|
||||||
dyn_obj.metadata = metadata;
|
|
||||||
|
|
||||||
// Attach payload
|
|
||||||
dyn_obj.data = payload;
|
|
||||||
|
|
||||||
Ok(dyn_obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
|
||||||
fn api_version_for<K>() -> String
|
|
||||||
where
|
|
||||||
K: K8sResource,
|
|
||||||
{
|
|
||||||
let group = K::GROUP;
|
|
||||||
let version = K::VERSION;
|
|
||||||
if group.is_empty() {
|
|
||||||
version.to_string() // core/v1 => "v1"
|
|
||||||
} else {
|
|
||||||
format!("{}/{}", group, version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use k8s_openapi::api::{
|
|
||||||
apps::v1::{Deployment, DeploymentSpec},
|
|
||||||
core::v1::{PodTemplateSpec, Secret},
|
|
||||||
};
|
|
||||||
use kube::api::ObjectMeta;
|
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn secret_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Secret resource
|
|
||||||
let mut secret = Secret {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-secret".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Convert to DynamicResource
|
|
||||||
let dynamic: DynamicObject =
|
|
||||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
|
||||||
|
|
||||||
// Serialize both the original and dynamic resources to Value
|
|
||||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
let dynamic_value =
|
|
||||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
|
||||||
|
|
||||||
// Assert that they are identical
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
secret.metadata.namespace = Some("false".to_string());
|
|
||||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
|
||||||
assert_ne!(modified_value, dynamic_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deployment_to_dynamic_roundtrip() {
|
|
||||||
// Create a sample Deployment with nested structures
|
|
||||||
let mut deployment = Deployment {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("my-deployment".to_string()),
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: Some(DeploymentSpec {
|
|
||||||
replicas: Some(3),
|
|
||||||
selector: Default::default(),
|
|
||||||
template: PodTemplateSpec {
|
|
||||||
metadata: Some(ObjectMeta {
|
|
||||||
labels: Some({
|
|
||||||
let mut map = std::collections::BTreeMap::new();
|
|
||||||
map.insert("app".to_string(), "nginx".to_string());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
|
||||||
|
|
||||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
|
||||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original_value, dynamic_value);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
|
||||||
3
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
dynamic
|
|
||||||
.data
|
|
||||||
.get("spec")
|
|
||||||
.unwrap()
|
|
||||||
.get("template")
|
|
||||||
.unwrap()
|
|
||||||
.get("metadata")
|
|
||||||
.unwrap()
|
|
||||||
.get("labels")
|
|
||||||
.unwrap()
|
|
||||||
.get("app")
|
|
||||||
.unwrap()
|
|
||||||
.as_str()
|
|
||||||
.unwrap(),
|
|
||||||
"nginx".to_string()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,5 @@ pub mod executors;
|
|||||||
pub mod hp_ilo;
|
pub mod hp_ilo;
|
||||||
pub mod intel_amt;
|
pub mod intel_amt;
|
||||||
pub mod inventory;
|
pub mod inventory;
|
||||||
pub mod kube;
|
|
||||||
pub mod network_manager;
|
|
||||||
pub mod opnsense;
|
pub mod opnsense;
|
||||||
mod sqlx;
|
mod sqlx;
|
||||||
|
|||||||
@@ -1,264 +0,0 @@
|
|||||||
use std::{
|
|
||||||
collections::{BTreeMap, HashSet},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use k8s_openapi::api::core::v1::Node;
|
|
||||||
use kube::{
|
|
||||||
ResourceExt,
|
|
||||||
api::{ObjectList, ObjectMeta},
|
|
||||||
};
|
|
||||||
use log::{debug, info};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
modules::okd::crd::nmstate,
|
|
||||||
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
|
||||||
/// It is documented in nmstate official doc, but worth mentionning here :
|
|
||||||
///
|
|
||||||
/// - You create a bond, nmstate will apply it
|
|
||||||
/// - You delete de bond from nmstate, it will NOT delete it
|
|
||||||
/// - To delete it you have to update it with configuration set to null
|
|
||||||
pub struct OpenShiftNmStateNetworkManager {
|
|
||||||
k8s_client: Arc<K8sClient>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("OpenShiftNmStateNetworkManager").finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
|
||||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
|
||||||
debug!("Installing NMState controller...");
|
|
||||||
// TODO use operatorhub maybe?
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Creating NMState namespace...");
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Creating NMState service account...");
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Creating NMState role...");
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Creating NMState role binding...");
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Creating NMState operator...");
|
|
||||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
|
||||||
").unwrap(), Some("nmstate"))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.k8s_client
|
|
||||||
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let nmstate = nmstate::NMState {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some("nmstate".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
debug!(
|
|
||||||
"Creating NMState:\n{}",
|
|
||||||
serde_yaml::to_string(&nmstate).unwrap()
|
|
||||||
);
|
|
||||||
self.k8s_client.apply(&nmstate, None).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
|
||||||
let hostname = self.get_hostname(&config.host_id).await.map_err(|e| {
|
|
||||||
NetworkError::new(format!(
|
|
||||||
"Can't configure bond, can't get hostname for host '{}': {e}",
|
|
||||||
config.host_id
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let bond_id = self.get_next_bond_id(&hostname).await.map_err(|e| {
|
|
||||||
NetworkError::new(format!(
|
|
||||||
"Can't configure bond, can't get an available bond id for host '{}': {e}",
|
|
||||||
config.host_id
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let bond_config = self.create_bond_configuration(&hostname, &bond_id, config);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Applying NMState bond config for host {}:\n{}",
|
|
||||||
config.host_id,
|
|
||||||
serde_yaml::to_string(&bond_config).unwrap(),
|
|
||||||
);
|
|
||||||
self.k8s_client
|
|
||||||
.apply(&bond_config, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| NetworkError::new(format!("Failed to configure bond: {e}")))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OpenShiftNmStateNetworkManager {
|
|
||||||
pub fn new(k8s_client: Arc<K8sClient>) -> Self {
|
|
||||||
Self { k8s_client }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_bond_configuration(
|
|
||||||
&self,
|
|
||||||
host: &str,
|
|
||||||
bond_name: &str,
|
|
||||||
config: &HostNetworkConfig,
|
|
||||||
) -> nmstate::NodeNetworkConfigurationPolicy {
|
|
||||||
info!("Configuring bond '{bond_name}' for host '{host}'...");
|
|
||||||
|
|
||||||
let mut bond_mtu: Option<u32> = None;
|
|
||||||
let mut copy_mac_from: Option<String> = None;
|
|
||||||
let mut bond_ports = Vec::new();
|
|
||||||
let mut interfaces: Vec<nmstate::Interface> = Vec::new();
|
|
||||||
|
|
||||||
for switch_port in &config.switch_ports {
|
|
||||||
let interface_name = switch_port.interface.name.clone();
|
|
||||||
|
|
||||||
interfaces.push(nmstate::Interface {
|
|
||||||
name: interface_name.clone(),
|
|
||||||
description: Some(format!("Member of bond {bond_name}")),
|
|
||||||
r#type: nmstate::InterfaceType::Ethernet,
|
|
||||||
state: "up".to_string(),
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
enabled: Some(false),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: None,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
bond_ports.push(interface_name.clone());
|
|
||||||
|
|
||||||
// Use the first port's details for the bond mtu and mac address
|
|
||||||
if bond_mtu.is_none() {
|
|
||||||
bond_mtu = Some(switch_port.interface.mtu);
|
|
||||||
}
|
|
||||||
if copy_mac_from.is_none() {
|
|
||||||
copy_mac_from = Some(interface_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
interfaces.push(nmstate::Interface {
|
|
||||||
name: bond_name.to_string(),
|
|
||||||
description: Some(format!("HARMONY - Network bond for host {host}")),
|
|
||||||
r#type: nmstate::InterfaceType::Bond,
|
|
||||||
state: "up".to_string(),
|
|
||||||
copy_mac_from,
|
|
||||||
ipv4: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
ipv6: Some(nmstate::IpStackSpec {
|
|
||||||
dhcp: Some(true),
|
|
||||||
autoconf: Some(true),
|
|
||||||
enabled: Some(true),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
link_aggregation: Some(nmstate::BondSpec {
|
|
||||||
mode: "802.3ad".to_string(),
|
|
||||||
ports: bond_ports,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
nmstate::NodeNetworkConfigurationPolicy {
|
|
||||||
metadata: ObjectMeta {
|
|
||||||
name: Some(format!("{host}-bond-config")),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
spec: nmstate::NodeNetworkConfigurationPolicySpec {
|
|
||||||
node_selector: Some(BTreeMap::from([(
|
|
||||||
"kubernetes.io/hostname".to_string(),
|
|
||||||
host.to_string(),
|
|
||||||
)])),
|
|
||||||
desired_state: nmstate::NetworkState {
|
|
||||||
interfaces,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_hostname(&self, host_id: &Id) -> Result<String, String> {
|
|
||||||
let nodes: ObjectList<Node> = self
|
|
||||||
.k8s_client
|
|
||||||
.list_resources(None, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
|
||||||
|
|
||||||
let Some(node) = nodes.iter().find(|n| {
|
|
||||||
n.status
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|s| s.node_info.as_ref())
|
|
||||||
.map(|i| i.system_uuid == host_id.to_string())
|
|
||||||
.unwrap_or(false)
|
|
||||||
}) else {
|
|
||||||
return Err(format!("No node found for host '{host_id}'"));
|
|
||||||
};
|
|
||||||
|
|
||||||
node.labels()
|
|
||||||
.get("kubernetes.io/hostname")
|
|
||||||
.ok_or(format!(
|
|
||||||
"Node '{host_id}' has no kubernetes.io/hostname label"
|
|
||||||
))
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_next_bond_id(&self, hostname: &str) -> Result<String, String> {
|
|
||||||
let network_state: Option<nmstate::NodeNetworkState> = self
|
|
||||||
.k8s_client
|
|
||||||
.get_resource(hostname, None)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
|
||||||
|
|
||||||
let interfaces = vec![];
|
|
||||||
let existing_bonds: Vec<&nmstate::Interface> = network_state
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|network_state| network_state.status.current_state.as_ref())
|
|
||||||
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
|
||||||
.iter()
|
|
||||||
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let used_ids: HashSet<u32> = existing_bonds
|
|
||||||
.iter()
|
|
||||||
.filter_map(|i| {
|
|
||||||
i.name
|
|
||||||
.strip_prefix("bond")
|
|
||||||
.and_then(|id| id.parse::<u32>().ok())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let next_id = (0..).find(|id| !used_ids.contains(id)).unwrap();
|
|
||||||
Ok(format!("bond{next_id}"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,6 +4,7 @@ mod firewall;
|
|||||||
mod http;
|
mod http;
|
||||||
mod load_balancer;
|
mod load_balancer;
|
||||||
mod management;
|
mod management;
|
||||||
|
pub mod node_exporter;
|
||||||
mod tftp;
|
mod tftp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
|||||||
47
harmony/src/infra/opnsense/node_exporter.rs
Normal file
47
harmony/src/infra/opnsense/node_exporter.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
executors::ExecutorError, infra::opnsense::OPNSenseFirewall,
|
||||||
|
topology::node_exporter::NodeExporter,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl NodeExporter for OPNSenseFirewall {
|
||||||
|
async fn ensure_initialized(&self) -> Result<(), ExecutorError> {
|
||||||
|
let mut config = self.opnsense_config.write().await;
|
||||||
|
let node_exporter = config.node_exporter();
|
||||||
|
if let Some(config) = node_exporter.get_full_config() {
|
||||||
|
debug!(
|
||||||
|
"Node exporter available in opnsense config, assuming it is already installed. {config:?}"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
config
|
||||||
|
.install_package("os-node_exporter")
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
ExecutorError::UnexpectedError(format!("Executor failed when trying to install os-node_exporter package with error {e:?}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
config
|
||||||
|
.node_exporter()
|
||||||
|
.enable(true)
|
||||||
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
async fn commit_config(&self) -> Result<(), ExecutorError> {
|
||||||
|
OPNSenseFirewall::commit_config(self).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload_restart(&self) -> Result<(), ExecutorError> {
|
||||||
|
self.opnsense_config
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.node_exporter()
|
||||||
|
.reload_restart()
|
||||||
|
.await
|
||||||
|
.map_err(|e| ExecutorError::UnexpectedError(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -74,11 +74,7 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
|
|
||||||
match ans {
|
match ans {
|
||||||
Ok(choice) => {
|
Ok(choice) => {
|
||||||
info!(
|
info!("Selected {} as the bootstrap node.", choice.summary());
|
||||||
"Selected {} as the {:?} node.",
|
|
||||||
choice.summary(),
|
|
||||||
self.score.role
|
|
||||||
);
|
|
||||||
host_repo
|
host_repo
|
||||||
.save_role_mapping(&self.score.role, &choice)
|
.save_role_mapping(&self.score.role, &choice)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -94,7 +90,10 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
|||||||
"Failed to select node for role {:?} : {}",
|
"Failed to select node for role {:?} : {}",
|
||||||
self.score.role, e
|
self.score.role, e
|
||||||
);
|
);
|
||||||
return Err(InterpretError::new(format!("Could not select host : {e}")));
|
return Err(InterpretError::new(format!(
|
||||||
|
"Could not select host : {}",
|
||||||
|
e.to_string()
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,20 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
interpret::Interpret, inventory::HostRole, modules::okd::bootstrap_okd_node::OKDNodeInterpret,
|
data::Version,
|
||||||
score::Score, topology::HAClusterTopology,
|
hardware::PhysicalHost,
|
||||||
|
infra::inventory::InventoryRepositoryFactory,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::{HostRole, Inventory},
|
||||||
|
modules::{
|
||||||
|
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||||
|
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
||||||
|
},
|
||||||
|
score::Score,
|
||||||
|
topology::{HAClusterTopology, HostBinding},
|
||||||
};
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::{debug, info};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
@@ -16,13 +28,226 @@ pub struct OKDSetup03ControlPlaneScore {}
|
|||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
||||||
// and for the cluster operators to become available. This would be similar to
|
|
||||||
// the `wait-for bootstrap-complete` command.
|
|
||||||
Box::new(OKDNodeInterpret::new(HostRole::ControlPlane))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"OKDSetup03ControlPlaneScore".to_string()
|
"OKDSetup03ControlPlaneScore".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OKDSetup03ControlPlaneInterpret {
|
||||||
|
version: Version,
|
||||||
|
status: InterpretStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDSetup03ControlPlaneInterpret {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let version = Version::from("1.0.0").unwrap();
|
||||||
|
Self {
|
||||||
|
version,
|
||||||
|
status: InterpretStatus::QUEUED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||||
|
/// It will trigger discovery if not enough hosts are found.
|
||||||
|
async fn get_nodes(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||||
|
const REQUIRED_HOSTS: usize = 3;
|
||||||
|
let repo = InventoryRepositoryFactory::build().await?;
|
||||||
|
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||||
|
|
||||||
|
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||||
|
info!(
|
||||||
|
"Discovery of {} control plane hosts in progress, current number {}",
|
||||||
|
REQUIRED_HOSTS,
|
||||||
|
control_plane_hosts.len()
|
||||||
|
);
|
||||||
|
// This score triggers the discovery agent for a specific role.
|
||||||
|
DiscoverHostForRoleScore {
|
||||||
|
role: HostRole::ControlPlane,
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||||
|
Err(InterpretError::new(format!(
|
||||||
|
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||||
|
REQUIRED_HOSTS,
|
||||||
|
control_plane_hosts.len()
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
// Take exactly the number of required hosts to ensure consistency.
|
||||||
|
Ok(control_plane_hosts
|
||||||
|
.into_iter()
|
||||||
|
.take(REQUIRED_HOSTS)
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configures DHCP host bindings for all control plane nodes.
|
||||||
|
async fn configure_host_binding(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
|
||||||
|
|
||||||
|
// Ensure the topology definition matches the number of physical nodes found.
|
||||||
|
if topology.control_plane.len() != nodes.len() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
|
||||||
|
topology.control_plane.len(),
|
||||||
|
nodes.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a binding for each physical host to its corresponding logical host.
|
||||||
|
let bindings: Vec<HostBinding> = topology
|
||||||
|
.control_plane
|
||||||
|
.iter()
|
||||||
|
.zip(nodes.iter())
|
||||||
|
.map(|(logical_host, physical_host)| {
|
||||||
|
info!(
|
||||||
|
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||||
|
logical_host.name, physical_host.id
|
||||||
|
);
|
||||||
|
HostBinding {
|
||||||
|
logical_host: logical_host.clone(),
|
||||||
|
physical_host: physical_host.clone(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
DhcpHostBindingScore {
|
||||||
|
host_binding: bindings,
|
||||||
|
domain: Some(topology.domain_name.clone()),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
|
||||||
|
async fn configure_ipxe(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
nodes: &Vec<PhysicalHost>,
|
||||||
|
) -> Result<(), InterpretError> {
|
||||||
|
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
|
||||||
|
|
||||||
|
// The iPXE script content is the same for all control plane nodes,
|
||||||
|
// pointing to the 'master.ign' ignition file.
|
||||||
|
let content = BootstrapIpxeTpl {
|
||||||
|
http_ip: &topology.http_server.get_ip().to_string(),
|
||||||
|
scos_path: "scos",
|
||||||
|
ignition_http_path: "okd_ignition_files",
|
||||||
|
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||||
|
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
|
||||||
|
}
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
debug!("[ControlPlane] iPXE content template:\n{content}");
|
||||||
|
|
||||||
|
// Create and apply an iPXE boot file for each node.
|
||||||
|
for node in nodes {
|
||||||
|
let mac_address = node.get_mac_address();
|
||||||
|
if mac_address.is_empty() {
|
||||||
|
return Err(InterpretError::new(format!(
|
||||||
|
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||||
|
node.id
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
info!(
|
||||||
|
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||||
|
node.id, mac_address
|
||||||
|
);
|
||||||
|
|
||||||
|
IPxeMacBootFileScore {
|
||||||
|
mac_address,
|
||||||
|
content: content.clone(),
|
||||||
|
}
|
||||||
|
.interpret(inventory, topology)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prompts the user to reboot the target control plane nodes.
|
||||||
|
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||||
|
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||||
|
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
||||||
|
|
||||||
|
let confirmation = inquire::Confirm::new(
|
||||||
|
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||||
|
)
|
||||||
|
.prompt()
|
||||||
|
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||||
|
|
||||||
|
if !confirmation {
|
||||||
|
return Err(InterpretError::new(
|
||||||
|
"User aborted the operation.".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("OKDSetup03ControlPlane")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
self.version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
self.status.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
inventory: &Inventory,
|
||||||
|
topology: &HAClusterTopology,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
// 1. Ensure we have 3 physical hosts for the control plane.
|
||||||
|
let nodes = self.get_nodes(inventory, topology).await?;
|
||||||
|
|
||||||
|
// 2. Create DHCP reservations for the control plane nodes.
|
||||||
|
self.configure_host_binding(inventory, topology, &nodes)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// 3. Create iPXE files for each control plane node to boot from the master ignition.
|
||||||
|
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||||
|
|
||||||
|
// 4. Reboot the nodes to start the OS installation.
|
||||||
|
self.reboot_targets(&nodes).await?;
|
||||||
|
|
||||||
|
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||||
|
// and for the cluster operators to become available. This would be similar to
|
||||||
|
// the `wait-for bootstrap-complete` command.
|
||||||
|
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||||
|
|
||||||
|
Ok(Outcome::success(
|
||||||
|
"Control plane provisioning has been successfully initiated.".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,15 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
use derive_new::new;
|
use derive_new::new;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::info;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interpret::Interpret, inventory::HostRole, modules::okd::bootstrap_okd_node::OKDNodeInterpret,
|
data::Version,
|
||||||
score::Score, topology::HAClusterTopology,
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::HAClusterTopology,
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------
|
||||||
@@ -17,10 +23,61 @@ pub struct OKDSetup04WorkersScore {}
|
|||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||||
Box::new(OKDNodeInterpret::new(HostRole::Worker))
|
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"OKDSetup04WorkersScore".to_string()
|
"OKDSetup04WorkersScore".to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OKDSetup04WorkersInterpret {
|
||||||
|
score: OKDSetup04WorkersScore,
|
||||||
|
version: Version,
|
||||||
|
status: InterpretStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OKDSetup04WorkersInterpret {
|
||||||
|
pub fn new(score: OKDSetup04WorkersScore) -> Self {
|
||||||
|
let version = Version::from("1.0.0").unwrap();
|
||||||
|
Self {
|
||||||
|
version,
|
||||||
|
score,
|
||||||
|
status: InterpretStatus::QUEUED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
|
||||||
|
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("OKDSetup04Workers")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
self.version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
self.status.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
_topology: &HAClusterTopology,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
self.render_and_reboot().await?;
|
||||||
|
Ok(Outcome::success("Workers provisioned".into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,303 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use derive_new::new;
|
|
||||||
use harmony_types::id::Id;
|
|
||||||
use log::{debug, info};
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
data::Version,
|
|
||||||
hardware::PhysicalHost,
|
|
||||||
infra::inventory::InventoryRepositoryFactory,
|
|
||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
|
||||||
inventory::{HostRole, Inventory},
|
|
||||||
modules::{
|
|
||||||
dhcp::DhcpHostBindingScore,
|
|
||||||
http::IPxeMacBootFileScore,
|
|
||||||
inventory::DiscoverHostForRoleScore,
|
|
||||||
okd::{
|
|
||||||
okd_node::{
|
|
||||||
BootstrapRole, ControlPlaneRole, OKDRoleProperties, StorageRole, WorkerRole,
|
|
||||||
},
|
|
||||||
templates::BootstrapIpxeTpl,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
score::Score,
|
|
||||||
topology::{HAClusterTopology, HostBinding, LogicalHost},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, new)]
|
|
||||||
pub struct OKDNodeInstallationScore {
|
|
||||||
host_role: HostRole,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Score<HAClusterTopology> for OKDNodeInstallationScore {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"OKDNodeScore".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
|
||||||
Box::new(OKDNodeInterpret::new(self.host_role.clone()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct OKDNodeInterpret {
|
|
||||||
host_role: HostRole,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDNodeInterpret {
|
|
||||||
pub fn new(host_role: HostRole) -> Self {
|
|
||||||
Self { host_role }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties {
|
|
||||||
match role {
|
|
||||||
HostRole::Bootstrap => &BootstrapRole,
|
|
||||||
HostRole::ControlPlane => &ControlPlaneRole,
|
|
||||||
HostRole::Worker => &WorkerRole,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_nodes(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
|
||||||
let repo = InventoryRepositoryFactory::build().await?;
|
|
||||||
|
|
||||||
let mut hosts = repo.get_host_for_role(&self.host_role).await?;
|
|
||||||
|
|
||||||
let okd_host_properties = self.okd_role_properties(&self.host_role);
|
|
||||||
|
|
||||||
let required_hosts: usize = okd_host_properties.required_hosts();
|
|
||||||
|
|
||||||
while hosts.len() < required_hosts {
|
|
||||||
info!(
|
|
||||||
"Discovery of {} {} hosts in progress, current number {}",
|
|
||||||
required_hosts,
|
|
||||||
self.host_role,
|
|
||||||
hosts.len()
|
|
||||||
);
|
|
||||||
// This score triggers the discovery agent for a specific role.
|
|
||||||
DiscoverHostForRoleScore {
|
|
||||||
role: self.host_role.clone(),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
hosts = repo.get_host_for_role(&self.host_role).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if hosts.len() < required_hosts {
|
|
||||||
Err(InterpretError::new(format!(
|
|
||||||
"OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.",
|
|
||||||
required_hosts,
|
|
||||||
self.host_role,
|
|
||||||
hosts.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
// Take exactly the number of required hosts to ensure consistency.
|
|
||||||
Ok(hosts.into_iter().take(required_hosts).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configures DHCP host bindings for all nodes.
|
|
||||||
async fn configure_host_binding(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!(
|
|
||||||
"[{}] Configuring host bindings for {} plane nodes.",
|
|
||||||
self.host_role, self.host_role,
|
|
||||||
);
|
|
||||||
|
|
||||||
let host_properties = self.okd_role_properties(&self.host_role);
|
|
||||||
|
|
||||||
self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?;
|
|
||||||
|
|
||||||
let bindings: Vec<HostBinding> =
|
|
||||||
self.host_bindings(nodes, host_properties.logical_hosts(topology));
|
|
||||||
|
|
||||||
DhcpHostBindingScore {
|
|
||||||
host_binding: bindings,
|
|
||||||
domain: Some(topology.domain_name.clone()),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the topology definition matches the number of physical nodes found.
|
|
||||||
fn validate_host_node_match(
|
|
||||||
&self,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
hosts: &Vec<LogicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
if hosts.len() != nodes.len() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).",
|
|
||||||
hosts.len(),
|
|
||||||
nodes.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a binding for each physical host to its corresponding logical host.
|
|
||||||
fn host_bindings(
|
|
||||||
&self,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
hosts: &Vec<LogicalHost>,
|
|
||||||
) -> Vec<HostBinding> {
|
|
||||||
hosts
|
|
||||||
.iter()
|
|
||||||
.zip(nodes.iter())
|
|
||||||
.map(|(logical_host, physical_host)| {
|
|
||||||
info!(
|
|
||||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
|
||||||
logical_host.name, physical_host.id
|
|
||||||
);
|
|
||||||
HostBinding {
|
|
||||||
logical_host: logical_host.clone(),
|
|
||||||
physical_host: physical_host.clone(),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Renders and deploys a per-MAC iPXE boot file for each node.
|
|
||||||
async fn configure_ipxe(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
nodes: &Vec<PhysicalHost>,
|
|
||||||
) -> Result<(), InterpretError> {
|
|
||||||
info!(
|
|
||||||
"[{}] Rendering per-MAC iPXE configurations.",
|
|
||||||
self.host_role
|
|
||||||
);
|
|
||||||
|
|
||||||
let okd_role_properties = self.okd_role_properties(&self.host_role);
|
|
||||||
// The iPXE script content is the same for all control plane nodes,
|
|
||||||
// pointing to the 'master.ign' ignition file.
|
|
||||||
let content = BootstrapIpxeTpl {
|
|
||||||
http_ip: &topology.http_server.get_ip().to_string(),
|
|
||||||
scos_path: "scos",
|
|
||||||
ignition_http_path: "okd_ignition_files",
|
|
||||||
//TODO must be refactored to not only use /dev/sda
|
|
||||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
|
||||||
ignition_file_name: okd_role_properties.ignition_file(),
|
|
||||||
}
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
debug!("[{}] iPXE content template:\n{content}", self.host_role);
|
|
||||||
|
|
||||||
// Create and apply an iPXE boot file for each node.
|
|
||||||
for node in nodes {
|
|
||||||
let mac_address = node.get_mac_address();
|
|
||||||
if mac_address.is_empty() {
|
|
||||||
return Err(InterpretError::new(format!(
|
|
||||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
|
||||||
node.id
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"[{}] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
|
||||||
self.host_role, node.id, mac_address
|
|
||||||
);
|
|
||||||
|
|
||||||
IPxeMacBootFileScore {
|
|
||||||
mac_address,
|
|
||||||
content: content.clone(),
|
|
||||||
}
|
|
||||||
.interpret(inventory, topology)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prompts the user to reboot the target control plane nodes.
|
|
||||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
|
||||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
|
||||||
info!(
|
|
||||||
"[{}] Requesting reboot for control plane nodes: {node_ids:?}",
|
|
||||||
self.host_role
|
|
||||||
);
|
|
||||||
|
|
||||||
let confirmation = inquire::Confirm::new(
|
|
||||||
&format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")),
|
|
||||||
)
|
|
||||||
.prompt()
|
|
||||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
|
||||||
|
|
||||||
if !confirmation {
|
|
||||||
return Err(InterpretError::new(
|
|
||||||
"User aborted the operation.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Interpret<HAClusterTopology> for OKDNodeInterpret {
|
|
||||||
async fn execute(
|
|
||||||
&self,
|
|
||||||
inventory: &Inventory,
|
|
||||||
topology: &HAClusterTopology,
|
|
||||||
) -> Result<Outcome, InterpretError> {
|
|
||||||
// 1. Ensure we have the specfied number of physical hosts.
|
|
||||||
let nodes = self.get_nodes(inventory, topology).await?;
|
|
||||||
|
|
||||||
// 2. Create DHCP reservations for the nodes.
|
|
||||||
self.configure_host_binding(inventory, topology, &nodes)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Create iPXE files for each node to boot from the ignition.
|
|
||||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
|
||||||
|
|
||||||
// 4. Reboot the nodes to start the OS installation.
|
|
||||||
self.reboot_targets(&nodes).await?;
|
|
||||||
// TODO: Implement a step to validate that the installation of the nodes is
|
|
||||||
// complete and for the cluster operators to become available.
|
|
||||||
//
|
|
||||||
// The OpenShift installer only provides two wait commands which currently need to be
|
|
||||||
// run manually:
|
|
||||||
// - `openshift-install wait-for bootstrap-complete`
|
|
||||||
// - `openshift-install wait-for install-complete`
|
|
||||||
//
|
|
||||||
// There is no installer command that waits specifically for worker node
|
|
||||||
// provisioning. Worker nodes join asynchronously (via ignition + CSR approval),
|
|
||||||
// and the cluster becomes fully functional only once all nodes are Ready and the
|
|
||||||
// cluster operators report Available=True.
|
|
||||||
info!(
|
|
||||||
"[{}] Provisioning initiated. Monitor the cluster convergence manually.",
|
|
||||||
self.host_role
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(Outcome::success(format!(
|
|
||||||
"{} provisioning has been successfully initiated.",
|
|
||||||
self.host_role
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_name(&self) -> InterpretName {
|
|
||||||
InterpretName::Custom("OKDNodeSetup".into())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_version(&self) -> Version {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_status(&self) -> InterpretStatus {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_children(&self) -> Vec<Id> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use k8s_openapi::{ClusterResourceScope, Resource};
|
use kube::CustomResource;
|
||||||
use kube::{CustomResource, api::ObjectMeta};
|
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@@ -48,223 +47,28 @@ pub struct ProbeDns {
|
|||||||
group = "nmstate.io",
|
group = "nmstate.io",
|
||||||
version = "v1",
|
version = "v1",
|
||||||
kind = "NodeNetworkConfigurationPolicy",
|
kind = "NodeNetworkConfigurationPolicy",
|
||||||
namespaced = false
|
namespaced
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct NodeNetworkConfigurationPolicySpec {
|
pub struct NodeNetworkConfigurationPolicySpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub node_selector: Option<BTreeMap<String, String>>,
|
pub node_selector: Option<BTreeMap<String, String>>,
|
||||||
pub desired_state: NetworkState,
|
pub desired_state: DesiredStateSpec,
|
||||||
}
|
|
||||||
|
|
||||||
// Currently, kube-rs derive doesn't support resources without a `spec` field, so we have
|
|
||||||
// to implement it ourselves.
|
|
||||||
//
|
|
||||||
// Ref:
|
|
||||||
// - https://github.com/kube-rs/kube/issues/1763
|
|
||||||
// - https://github.com/kube-rs/kube/discussions/1762
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NodeNetworkState {
|
|
||||||
metadata: ObjectMeta,
|
|
||||||
pub status: NodeNetworkStateStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Resource for NodeNetworkState {
|
|
||||||
const API_VERSION: &'static str = "nmstate.io/v1beta1";
|
|
||||||
const GROUP: &'static str = "nmstate.io";
|
|
||||||
const VERSION: &'static str = "v1beta1";
|
|
||||||
const KIND: &'static str = "NodeNetworkState";
|
|
||||||
const URL_PATH_SEGMENT: &'static str = "nodenetworkstates";
|
|
||||||
type Scope = ClusterResourceScope;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl k8s_openapi::Metadata for NodeNetworkState {
|
|
||||||
type Ty = ObjectMeta;
|
|
||||||
|
|
||||||
fn metadata(&self) -> &Self::Ty {
|
|
||||||
&self.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
|
||||||
&mut self.metadata
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct NodeNetworkStateStatus {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub current_state: Option<NetworkState>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub handler_nmstate_version: Option<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub host_network_manager_version: Option<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub last_successful_update_time: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The NetworkState is the top-level struct, representing the entire
|
|
||||||
/// desired or current network state.
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct NetworkState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub hostname: Option<HostNameState>,
|
|
||||||
#[serde(rename = "dns-resolver", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub dns: Option<DnsState>,
|
|
||||||
#[serde(rename = "route-rules", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub rules: Option<RouteRuleState>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub routes: Option<RouteState>,
|
|
||||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
|
||||||
pub interfaces: Vec<Interface>,
|
|
||||||
#[serde(rename = "ovs-db", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ovsdb: Option<OvsDbGlobalConfig>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ovn: Option<OvnConfiguration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct HostNameState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub running: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub config: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DnsState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub running: Option<DnsResolverConfig>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub config: Option<DnsResolverConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DnsResolverConfig {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub search: Option<Vec<String>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub server: Option<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RouteRuleState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub config: Option<Vec<RouteRule>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub running: Option<Vec<RouteRule>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RouteState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub config: Option<Vec<Route>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub running: Option<Vec<Route>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RouteRule {
|
|
||||||
#[serde(rename = "ip-from", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ip_from: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub priority: Option<u32>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub route_table: Option<u32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Route {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub destination: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metric: Option<u32>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub next_hop_address: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub next_hop_interface: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub table_id: Option<u32>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mtu: Option<u32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvsDbGlobalConfig {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub external_ids: Option<BTreeMap<String, String>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub other_config: Option<BTreeMap<String, String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvnConfiguration {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bridge_mappings: Option<Vec<OvnBridgeMapping>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvnBridgeMapping {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub localnet: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bridge: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum StpSpec {
|
|
||||||
Bool(bool),
|
|
||||||
Options(StpOptions),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct LldpState {
|
pub struct DesiredStateSpec {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
pub interfaces: Vec<InterfaceSpec>,
|
||||||
pub enabled: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct OvsDb {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub external_ids: Option<BTreeMap<String, String>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub other_config: Option<BTreeMap<String, String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct PatchState {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub peer: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct Interface {
|
pub struct InterfaceSpec {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub r#type: InterfaceType,
|
pub r#type: String,
|
||||||
pub state: String,
|
pub state: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mac_address: Option<String>,
|
pub mac_address: Option<String>,
|
||||||
@@ -295,81 +99,9 @@ pub struct Interface {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(alias = "bridge")]
|
|
||||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ethtool: Option<Value>,
|
pub ethtool: Option<EthtoolSpec>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub accept_all_mac_addresses: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub identifier: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub lldp: Option<LldpState>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub permanent_mac_address: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub max_mtu: Option<u32>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub min_mtu: Option<u32>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mptcp: Option<Value>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub profile_name: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub wait_ip: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ovs_db: Option<OvsDb>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub driver: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub patch: Option<PatchState>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, JsonSchema)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum InterfaceType {
|
|
||||||
#[serde(rename = "unknown")]
|
|
||||||
Unknown,
|
|
||||||
#[serde(rename = "dummy")]
|
|
||||||
Dummy,
|
|
||||||
#[serde(rename = "loopback")]
|
|
||||||
Loopback,
|
|
||||||
#[serde(rename = "linux-bridge")]
|
|
||||||
LinuxBridge,
|
|
||||||
#[serde(rename = "ovs-bridge")]
|
|
||||||
OvsBridge,
|
|
||||||
#[serde(rename = "ovs-interface")]
|
|
||||||
OvsInterface,
|
|
||||||
#[serde(rename = "bond")]
|
|
||||||
Bond,
|
|
||||||
#[serde(rename = "ipvlan")]
|
|
||||||
IpVlan,
|
|
||||||
#[serde(rename = "vlan")]
|
|
||||||
Vlan,
|
|
||||||
#[serde(rename = "vxlan")]
|
|
||||||
Vxlan,
|
|
||||||
#[serde(rename = "mac-vlan")]
|
|
||||||
Macvlan,
|
|
||||||
#[serde(rename = "mac-vtap")]
|
|
||||||
Macvtap,
|
|
||||||
#[serde(rename = "ethernet")]
|
|
||||||
Ethernet,
|
|
||||||
#[serde(rename = "infiniband")]
|
|
||||||
Infiniband,
|
|
||||||
#[serde(rename = "vrf")]
|
|
||||||
Vrf,
|
|
||||||
#[serde(rename = "veth")]
|
|
||||||
Veth,
|
|
||||||
#[serde(rename = "ipsec")]
|
|
||||||
Ipsec,
|
|
||||||
#[serde(rename = "hsr")]
|
|
||||||
Hrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for InterfaceType {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Loopback
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
@@ -417,7 +149,6 @@ pub struct EthernetSpec {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct BondSpec {
|
pub struct BondSpec {
|
||||||
pub mode: String,
|
pub mode: String,
|
||||||
#[serde(alias = "port")]
|
|
||||||
pub ports: Vec<String>,
|
pub ports: Vec<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<BTreeMap<String, Value>>,
|
pub options: Option<BTreeMap<String, Value>>,
|
||||||
@@ -556,15 +287,11 @@ pub struct OvsBridgeSpec {
|
|||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct OvsBridgeOptions {
|
pub struct OvsBridgeOptions {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub stp: Option<StpSpec>,
|
pub stp: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub rstp: Option<bool>,
|
pub rstp: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub mcast_snooping_enable: Option<bool>,
|
pub mcast_snooping_enable: Option<bool>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub datapath: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub fail_mode: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
@@ -578,3 +305,18 @@ pub struct OvsPortSpec {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub r#type: Option<String>,
|
pub r#type: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct EthtoolSpec {
|
||||||
|
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct EthtoolFecSpec {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub auto: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mode: Option<String>,
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use harmony_types::id::Id;
|
use harmony_types::id::Id;
|
||||||
use log::{info, warn};
|
use log::{debug, info};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -9,7 +9,7 @@ use crate::{
|
|||||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
inventory::Inventory,
|
inventory::Inventory,
|
||||||
score::Score,
|
score::Score,
|
||||||
topology::{HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort, Topology},
|
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
@@ -17,7 +17,7 @@ pub struct HostNetworkConfigurationScore {
|
|||||||
pub hosts: Vec<PhysicalHost>,
|
pub hosts: Vec<PhysicalHost>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Topology + NetworkManager + Switch> Score<T> for HostNetworkConfigurationScore {
|
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
"HostNetworkConfigurationScore".into()
|
"HostNetworkConfigurationScore".into()
|
||||||
}
|
}
|
||||||
@@ -35,7 +35,7 @@ pub struct HostNetworkConfigurationInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HostNetworkConfigurationInterpret {
|
impl HostNetworkConfigurationInterpret {
|
||||||
async fn configure_network_for_host<T: Topology + NetworkManager + Switch>(
|
async fn configure_network_for_host<T: Topology + Switch>(
|
||||||
&self,
|
&self,
|
||||||
topology: &T,
|
topology: &T,
|
||||||
host: &PhysicalHost,
|
host: &PhysicalHost,
|
||||||
@@ -49,13 +49,6 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
switch_ports: vec![],
|
switch_ports: vec![],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if host.network.len() == 1 {
|
|
||||||
info!("[Host {current_host}/{total_hosts}] Only one interface to configure, skipping");
|
|
||||||
return Ok(HostNetworkConfig {
|
|
||||||
host_id: host.id.clone(),
|
|
||||||
switch_ports: vec![],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let switch_ports = self
|
let switch_ports = self
|
||||||
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
||||||
@@ -66,7 +59,7 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
switch_ports,
|
switch_ports,
|
||||||
};
|
};
|
||||||
|
|
||||||
if config.switch_ports.len() > 1 {
|
if !config.switch_ports.is_empty() {
|
||||||
info!(
|
info!(
|
||||||
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
||||||
config.switch_ports.len(),
|
config.switch_ports.len(),
|
||||||
@@ -74,25 +67,15 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
);
|
);
|
||||||
|
|
||||||
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
||||||
topology.configure_bond(&config).await.map_err(|e| {
|
|
||||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
|
||||||
})?;
|
|
||||||
topology
|
topology
|
||||||
.configure_port_channel(&config)
|
.configure_host_network(&config)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
} else {
|
||||||
})?;
|
|
||||||
} else if config.switch_ports.is_empty() {
|
|
||||||
info!(
|
info!(
|
||||||
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
||||||
host.network.len()
|
host.network.len()
|
||||||
);
|
);
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"[Host {current_host}/{total_hosts}] Found a single port for {} interfaces, skipping",
|
|
||||||
host.network.len()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
@@ -130,7 +113,7 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
port,
|
port,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(None) => {}
|
Ok(None) => debug!("No port found for '{mac_address}', skipping"),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(InterpretError::new(format!(
|
return Err(InterpretError::new(format!(
|
||||||
"Failed to get port for host '{}': {}",
|
"Failed to get port for host '{}': {}",
|
||||||
@@ -150,6 +133,15 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for config in configs {
|
for config in configs {
|
||||||
|
let host = self
|
||||||
|
.score
|
||||||
|
.hosts
|
||||||
|
.iter()
|
||||||
|
.find(|h| h.id == config.host_id)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
println!("[Host] {host}");
|
||||||
|
|
||||||
if config.switch_ports.is_empty() {
|
if config.switch_ports.is_empty() {
|
||||||
report.push(format!(
|
report.push(format!(
|
||||||
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||||
@@ -177,7 +169,7 @@ impl HostNetworkConfigurationInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||||
fn get_name(&self) -> InterpretName {
|
fn get_name(&self) -> InterpretName {
|
||||||
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
||||||
}
|
}
|
||||||
@@ -206,12 +198,6 @@ impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigur
|
|||||||
let host_count = self.score.hosts.len();
|
let host_count = self.score.hosts.len();
|
||||||
info!("Started network configuration for {host_count} host(s)...",);
|
info!("Started network configuration for {host_count} host(s)...",);
|
||||||
|
|
||||||
info!("Setting up NetworkManager...",);
|
|
||||||
topology
|
|
||||||
.ensure_network_manager_installed()
|
|
||||||
.await
|
|
||||||
.map_err(|e| InterpretError::new(format!("NetworkManager setup failed: {e}")))?;
|
|
||||||
|
|
||||||
info!("Setting up switch with sane defaults...");
|
info!("Setting up switch with sane defaults...");
|
||||||
topology
|
topology
|
||||||
.setup_switch()
|
.setup_switch()
|
||||||
@@ -230,7 +216,6 @@ impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigur
|
|||||||
host_configurations.push(host_configuration);
|
host_configurations.push(host_configuration);
|
||||||
current_host += 1;
|
current_host += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if current_host > 1 {
|
if current_host > 1 {
|
||||||
let details = self.format_host_configuration(host_configurations);
|
let details = self.format_host_configuration(host_configurations);
|
||||||
|
|
||||||
@@ -257,8 +242,7 @@ mod tests {
|
|||||||
use crate::{
|
use crate::{
|
||||||
hardware::HostCategory,
|
hardware::HostCategory,
|
||||||
topology::{
|
topology::{
|
||||||
HostNetworkConfig, NetworkError, PreparationError, PreparationOutcome, SwitchError,
|
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
||||||
SwitchPort,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
@@ -283,18 +267,6 @@ mod tests {
|
|||||||
speed_mbps: None,
|
speed_mbps: None,
|
||||||
mtu: 1,
|
mtu: 1,
|
||||||
};
|
};
|
||||||
pub static ref YET_ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
|
||||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F3".to_string()).unwrap(),
|
|
||||||
name: "interface-3".into(),
|
|
||||||
speed_mbps: None,
|
|
||||||
mtu: 1,
|
|
||||||
};
|
|
||||||
pub static ref LAST_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
|
||||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F4".to_string()).unwrap(),
|
|
||||||
name: "interface-4".into(),
|
|
||||||
speed_mbps: None,
|
|
||||||
mtu: 1,
|
|
||||||
};
|
|
||||||
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
||||||
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
||||||
name: "unknown-interface".into(),
|
name: "unknown-interface".into(),
|
||||||
@@ -303,8 +275,6 @@ mod tests {
|
|||||||
};
|
};
|
||||||
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
||||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||||
pub static ref YET_ANOTHER_PORT: PortLocation = PortLocation(1, 0, 45);
|
|
||||||
pub static ref LAST_PORT: PortLocation = PortLocation(2, 0, 45);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -320,33 +290,28 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn should_setup_network_manager() {
|
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||||
let score = given_score(vec![host]);
|
let score = given_score(vec![host]);
|
||||||
let topology = TopologyWithSwitch::new();
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let network_manager_setup = topology.network_manager_setup.lock().unwrap();
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
assert_that!(*network_manager_setup).is_true();
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
|
HOST_ID.clone(),
|
||||||
|
HostNetworkConfig {
|
||||||
|
host_id: HOST_ID.clone(),
|
||||||
|
switch_ports: vec![SwitchPort {
|
||||||
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
|
port: PORT.clone(),
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn host_with_one_mac_address_should_skip_host_configuration() {
|
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
||||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
|
||||||
let score = given_score(vec![host]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let config = topology.configured_bonds.lock().unwrap();
|
|
||||||
assert_that!(*config).is_empty();
|
|
||||||
let config = topology.configured_port_channels.lock().unwrap();
|
|
||||||
assert_that!(*config).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn host_with_multiple_mac_addresses_should_configure_one_bond_with_all_interfaces() {
|
|
||||||
let score = given_score(vec![given_host(
|
let score = given_score(vec![given_host(
|
||||||
&HOST_ID,
|
&HOST_ID,
|
||||||
vec![
|
vec![
|
||||||
@@ -358,8 +323,8 @@ mod tests {
|
|||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let config = topology.configured_bonds.lock().unwrap();
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
assert_that!(*config).contains_exactly(vec![(
|
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: HOST_ID.clone(),
|
host_id: HOST_ID.clone(),
|
||||||
@@ -378,183 +343,49 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn host_with_multiple_mac_addresses_should_configure_one_port_channel_with_all_interfaces()
|
async fn multiple_hosts_should_create_one_bond_per_host() {
|
||||||
{
|
|
||||||
let score = given_score(vec![given_host(
|
|
||||||
&HOST_ID,
|
|
||||||
vec![
|
|
||||||
EXISTING_INTERFACE.clone(),
|
|
||||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
)]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let config = topology.configured_port_channels.lock().unwrap();
|
|
||||||
assert_that!(*config).contains_exactly(vec![(
|
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
host_id: HOST_ID.clone(),
|
|
||||||
switch_ports: vec![
|
|
||||||
SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
},
|
|
||||||
SwitchPort {
|
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: ANOTHER_PORT.clone(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn multiple_hosts_should_configure_one_bond_per_host() {
|
|
||||||
let score = given_score(vec![
|
let score = given_score(vec![
|
||||||
given_host(
|
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
||||||
&HOST_ID,
|
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
||||||
vec![
|
|
||||||
EXISTING_INTERFACE.clone(),
|
|
||||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
given_host(
|
|
||||||
&ANOTHER_HOST_ID,
|
|
||||||
vec![
|
|
||||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
LAST_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
]);
|
]);
|
||||||
let topology = TopologyWithSwitch::new();
|
let topology = TopologyWithSwitch::new();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let config = topology.configured_bonds.lock().unwrap();
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
assert_that!(*config).contains_exactly(vec![
|
assert_that!(*configured_host_networks).contains_exactly(vec![
|
||||||
(
|
(
|
||||||
HOST_ID.clone(),
|
HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: HOST_ID.clone(),
|
host_id: HOST_ID.clone(),
|
||||||
switch_ports: vec![
|
switch_ports: vec![SwitchPort {
|
||||||
SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
interface: EXISTING_INTERFACE.clone(),
|
||||||
port: PORT.clone(),
|
port: PORT.clone(),
|
||||||
},
|
}],
|
||||||
SwitchPort {
|
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: ANOTHER_PORT.clone(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ANOTHER_HOST_ID.clone(),
|
ANOTHER_HOST_ID.clone(),
|
||||||
HostNetworkConfig {
|
HostNetworkConfig {
|
||||||
host_id: ANOTHER_HOST_ID.clone(),
|
host_id: ANOTHER_HOST_ID.clone(),
|
||||||
switch_ports: vec![
|
switch_ports: vec![SwitchPort {
|
||||||
SwitchPort {
|
|
||||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: YET_ANOTHER_PORT.clone(),
|
|
||||||
},
|
|
||||||
SwitchPort {
|
|
||||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
|
||||||
port: LAST_PORT.clone(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn multiple_hosts_should_configure_one_port_channel_per_host() {
|
|
||||||
let score = given_score(vec![
|
|
||||||
given_host(
|
|
||||||
&HOST_ID,
|
|
||||||
vec![
|
|
||||||
EXISTING_INTERFACE.clone(),
|
|
||||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
given_host(
|
|
||||||
&ANOTHER_HOST_ID,
|
|
||||||
vec![
|
|
||||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
LAST_EXISTING_INTERFACE.clone(),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
let topology = TopologyWithSwitch::new();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let config = topology.configured_port_channels.lock().unwrap();
|
|
||||||
assert_that!(*config).contains_exactly(vec![
|
|
||||||
(
|
|
||||||
HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
host_id: HOST_ID.clone(),
|
|
||||||
switch_ports: vec![
|
|
||||||
SwitchPort {
|
|
||||||
interface: EXISTING_INTERFACE.clone(),
|
|
||||||
port: PORT.clone(),
|
|
||||||
},
|
|
||||||
SwitchPort {
|
|
||||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||||
port: ANOTHER_PORT.clone(),
|
port: ANOTHER_PORT.clone(),
|
||||||
},
|
}],
|
||||||
],
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ANOTHER_HOST_ID.clone(),
|
|
||||||
HostNetworkConfig {
|
|
||||||
host_id: ANOTHER_HOST_ID.clone(),
|
|
||||||
switch_ports: vec![
|
|
||||||
SwitchPort {
|
|
||||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
|
||||||
port: YET_ANOTHER_PORT.clone(),
|
|
||||||
},
|
|
||||||
SwitchPort {
|
|
||||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
|
||||||
port: LAST_PORT.clone(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn port_not_found_for_mac_address_should_not_configure_host() {
|
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||||
let topology = TopologyWithSwitch::new_port_not_found();
|
let topology = TopologyWithSwitch::new_port_not_found();
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||||
|
|
||||||
let config = topology.configured_port_channels.lock().unwrap();
|
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||||
assert_that!(*config).is_empty();
|
assert_that!(*configured_host_networks).is_empty();
|
||||||
let config = topology.configured_bonds.lock().unwrap();
|
|
||||||
assert_that!(*config).is_empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn only_one_port_found_for_multiple_mac_addresses_should_not_configure_host() {
|
|
||||||
let score = given_score(vec![given_host(
|
|
||||||
&HOST_ID,
|
|
||||||
vec![EXISTING_INTERFACE.clone(), UNKNOWN_INTERFACE.clone()],
|
|
||||||
)]);
|
|
||||||
let topology = TopologyWithSwitch::new_single_port_found();
|
|
||||||
|
|
||||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
|
||||||
|
|
||||||
let config = topology.configured_port_channels.lock().unwrap();
|
|
||||||
assert_that!(*config).is_empty();
|
|
||||||
let config = topology.configured_bonds.lock().unwrap();
|
|
||||||
assert_that!(*config).is_empty();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
||||||
@@ -591,48 +422,26 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct TopologyWithSwitch {
|
struct TopologyWithSwitch {
|
||||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||||
configured_port_channels: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||||
switch_setup: Arc<Mutex<bool>>,
|
switch_setup: Arc<Mutex<bool>>,
|
||||||
network_manager_setup: Arc<Mutex<bool>>,
|
|
||||||
configured_bonds: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TopologyWithSwitch {
|
impl TopologyWithSwitch {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![
|
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
||||||
PORT.clone(),
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
ANOTHER_PORT.clone(),
|
|
||||||
YET_ANOTHER_PORT.clone(),
|
|
||||||
LAST_PORT.clone(),
|
|
||||||
])),
|
|
||||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
|
||||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_port_not_found() -> Self {
|
fn new_port_not_found() -> Self {
|
||||||
Self {
|
Self {
|
||||||
available_ports: Arc::new(Mutex::new(vec![])),
|
available_ports: Arc::new(Mutex::new(vec![])),
|
||||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
switch_setup: Arc::new(Mutex::new(false)),
|
||||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
|
||||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_single_port_found() -> Self {
|
|
||||||
Self {
|
|
||||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone()])),
|
|
||||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
|
||||||
switch_setup: Arc::new(Mutex::new(false)),
|
|
||||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
|
||||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -648,22 +457,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl NetworkManager for TopologyWithSwitch {
|
|
||||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
|
||||||
let mut network_manager_installed = self.network_manager_setup.lock().unwrap();
|
|
||||||
*network_manager_installed = true;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
|
||||||
let mut configured_bonds = self.configured_bonds.lock().unwrap();
|
|
||||||
configured_bonds.push((config.host_id.clone(), config.clone()));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Switch for TopologyWithSwitch {
|
impl Switch for TopologyWithSwitch {
|
||||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||||
@@ -683,12 +476,12 @@ mod tests {
|
|||||||
Ok(Some(ports.remove(0)))
|
Ok(Some(ports.remove(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn configure_port_channel(
|
async fn configure_host_network(
|
||||||
&self,
|
&self,
|
||||||
config: &HostNetworkConfig,
|
config: &HostNetworkConfig,
|
||||||
) -> Result<(), SwitchError> {
|
) -> Result<(), SwitchError> {
|
||||||
let mut configured_port_channels = self.configured_port_channels.lock().unwrap();
|
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
||||||
configured_port_channels.push((config.host_id.clone(), config.clone()));
|
configured_host_networks.push((config.host_id.clone(), config.clone()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,14 +6,12 @@ mod bootstrap_05_sanity_check;
|
|||||||
mod bootstrap_06_installation_report;
|
mod bootstrap_06_installation_report;
|
||||||
pub mod bootstrap_dhcp;
|
pub mod bootstrap_dhcp;
|
||||||
pub mod bootstrap_load_balancer;
|
pub mod bootstrap_load_balancer;
|
||||||
pub mod bootstrap_okd_node;
|
|
||||||
mod bootstrap_persist_network_bond;
|
mod bootstrap_persist_network_bond;
|
||||||
pub mod dhcp;
|
pub mod dhcp;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod installation;
|
pub mod installation;
|
||||||
pub mod ipxe;
|
pub mod ipxe;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
pub mod okd_node;
|
|
||||||
pub mod templates;
|
pub mod templates;
|
||||||
pub mod upgrade;
|
pub mod upgrade;
|
||||||
pub use bootstrap_01_prepare::*;
|
pub use bootstrap_01_prepare::*;
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
use crate::topology::{HAClusterTopology, LogicalHost};
|
|
||||||
|
|
||||||
pub trait OKDRoleProperties {
|
|
||||||
fn ignition_file(&self) -> &'static str;
|
|
||||||
fn required_hosts(&self) -> usize;
|
|
||||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BootstrapRole;
|
|
||||||
pub struct ControlPlaneRole;
|
|
||||||
pub struct WorkerRole;
|
|
||||||
pub struct StorageRole;
|
|
||||||
|
|
||||||
impl OKDRoleProperties for BootstrapRole {
|
|
||||||
fn ignition_file(&self) -> &'static str {
|
|
||||||
"bootstrap.ign"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn required_hosts(&self) -> usize {
|
|
||||||
1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDRoleProperties for ControlPlaneRole {
|
|
||||||
fn ignition_file(&self) -> &'static str {
|
|
||||||
"master.ign"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn required_hosts(&self) -> usize {
|
|
||||||
3
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
|
||||||
&t.control_plane
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OKDRoleProperties for WorkerRole {
|
|
||||||
fn ignition_file(&self) -> &'static str {
|
|
||||||
"worker.ign"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn required_hosts(&self) -> usize {
|
|
||||||
2
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
|
||||||
&t.workers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod node_exporter;
|
||||||
mod shell;
|
mod shell;
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
pub use shell::*;
|
pub use shell::*;
|
||||||
|
|||||||
70
harmony/src/modules/opnsense/node_exporter.rs
Normal file
70
harmony/src/modules/opnsense/node_exporter.rs
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use harmony_types::id::Id;
|
||||||
|
use log::info;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
data::Version,
|
||||||
|
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||||
|
inventory::Inventory,
|
||||||
|
score::Score,
|
||||||
|
topology::{Topology, node_exporter::NodeExporter},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct NodeExporterScore {}
|
||||||
|
|
||||||
|
impl<T: Topology + NodeExporter> Score<T> for NodeExporterScore {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
"NodeExporterScore".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||||
|
Box::new(NodeExporterInterpret {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NodeExporterInterpret {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: Topology + NodeExporter> Interpret<T> for NodeExporterInterpret {
|
||||||
|
async fn execute(
|
||||||
|
&self,
|
||||||
|
_inventory: &Inventory,
|
||||||
|
node_exporter: &T,
|
||||||
|
) -> Result<Outcome, InterpretError> {
|
||||||
|
info!(
|
||||||
|
"Making sure node exporter is initiailized: {:?}",
|
||||||
|
node_exporter.ensure_initialized().await?
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("Applying Node Exporter configuration");
|
||||||
|
|
||||||
|
node_exporter.commit_config().await?;
|
||||||
|
|
||||||
|
info!("Reloading and restarting Node Exporter");
|
||||||
|
|
||||||
|
node_exporter.reload_restart().await?;
|
||||||
|
|
||||||
|
Ok(Outcome::success(format!(
|
||||||
|
"NodeExporter successfully configured"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_name(&self) -> InterpretName {
|
||||||
|
InterpretName::Custom("NodeExporter")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_version(&self) -> Version {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> InterpretStatus {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_children(&self) -> Vec<Id> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
||||||
pub struct MacAddress(pub [u8; 6]);
|
pub struct MacAddress(pub [u8; 6]);
|
||||||
|
|
||||||
impl MacAddress {
|
impl MacAddress {
|
||||||
@@ -19,14 +19,6 @@ impl From<&MacAddress> for String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for MacAddress {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_tuple("MacAddress")
|
|
||||||
.field(&String::from(self))
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for MacAddress {
|
impl std::fmt::Display for MacAddress {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.write_str(&String::from(self))
|
f.write_str(&String::from(self))
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pub struct Interface {
|
|||||||
pub physical_interface_name: String,
|
pub physical_interface_name: String,
|
||||||
pub descr: Option<MaybeString>,
|
pub descr: Option<MaybeString>,
|
||||||
pub mtu: Option<MaybeString>,
|
pub mtu: Option<MaybeString>,
|
||||||
pub enable: Option<MaybeString>,
|
pub enable: MaybeString,
|
||||||
pub lock: Option<MaybeString>,
|
pub lock: Option<MaybeString>,
|
||||||
#[yaserde(rename = "spoofmac")]
|
#[yaserde(rename = "spoofmac")]
|
||||||
pub spoof_mac: Option<MaybeString>,
|
pub spoof_mac: Option<MaybeString>,
|
||||||
@@ -134,15 +134,19 @@ mod test {
|
|||||||
<interfaces>
|
<interfaces>
|
||||||
<paul>
|
<paul>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</paul>
|
</paul>
|
||||||
<anotherpaul>
|
<anotherpaul>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</anotherpaul>
|
</anotherpaul>
|
||||||
<thirdone>
|
<thirdone>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</thirdone>
|
</thirdone>
|
||||||
<andgofor4>
|
<andgofor4>
|
||||||
<if></if>
|
<if></if>
|
||||||
|
<enable/>
|
||||||
</andgofor4>
|
</andgofor4>
|
||||||
</interfaces>
|
</interfaces>
|
||||||
<bar>foo</bar>
|
<bar>foo</bar>
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub struct OPNsense {
|
|||||||
pub interfaces: NamedList<Interface>,
|
pub interfaces: NamedList<Interface>,
|
||||||
pub dhcpd: NamedList<DhcpInterface>,
|
pub dhcpd: NamedList<DhcpInterface>,
|
||||||
pub snmpd: Snmpd,
|
pub snmpd: Snmpd,
|
||||||
pub syslog: Syslog,
|
pub syslog: Option<Syslog>,
|
||||||
pub nat: Nat,
|
pub nat: Nat,
|
||||||
pub filter: Filters,
|
pub filter: Filters,
|
||||||
pub load_balancer: Option<LoadBalancer>,
|
pub load_balancer: Option<LoadBalancer>,
|
||||||
@@ -190,7 +190,7 @@ pub struct System {
|
|||||||
pub webgui: WebGui,
|
pub webgui: WebGui,
|
||||||
pub usevirtualterminal: u8,
|
pub usevirtualterminal: u8,
|
||||||
pub disablenatreflection: Option<String>,
|
pub disablenatreflection: Option<String>,
|
||||||
pub disableconsolemenu: u8,
|
pub disableconsolemenu: Option<u8>,
|
||||||
pub disablevlanhwfilter: u8,
|
pub disablevlanhwfilter: u8,
|
||||||
pub disablechecksumoffloading: u8,
|
pub disablechecksumoffloading: u8,
|
||||||
pub disablesegmentationoffloading: u8,
|
pub disablesegmentationoffloading: u8,
|
||||||
@@ -216,7 +216,7 @@ pub struct System {
|
|||||||
pub maximumfrags: Option<MaybeString>,
|
pub maximumfrags: Option<MaybeString>,
|
||||||
pub aliasesresolveinterval: Option<MaybeString>,
|
pub aliasesresolveinterval: Option<MaybeString>,
|
||||||
pub maximumtableentries: Option<MaybeString>,
|
pub maximumtableentries: Option<MaybeString>,
|
||||||
pub language: String,
|
pub language: Option<String>,
|
||||||
pub dnsserver: Option<MaybeString>,
|
pub dnsserver: Option<MaybeString>,
|
||||||
pub dns1gw: Option<String>,
|
pub dns1gw: Option<String>,
|
||||||
pub dns2gw: Option<String>,
|
pub dns2gw: Option<String>,
|
||||||
@@ -233,16 +233,16 @@ pub struct System {
|
|||||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
pub struct Ssh {
|
pub struct Ssh {
|
||||||
pub group: String,
|
pub group: String,
|
||||||
pub noauto: u8,
|
pub noauto: Option<u8>,
|
||||||
pub interfaces: MaybeString,
|
pub interfaces: Option<MaybeString>,
|
||||||
pub kex: MaybeString,
|
pub kex: Option<MaybeString>,
|
||||||
pub ciphers: MaybeString,
|
pub ciphers: Option<MaybeString>,
|
||||||
pub macs: MaybeString,
|
pub macs: Option<MaybeString>,
|
||||||
pub keys: MaybeString,
|
pub keys: Option<MaybeString>,
|
||||||
pub enabled: String,
|
pub enabled: Option<String>,
|
||||||
pub passwordauth: u8,
|
pub passwordauth: Option<u8>,
|
||||||
pub keysig: MaybeString,
|
pub keysig: Option<MaybeString>,
|
||||||
pub permitrootlogin: u8,
|
pub permitrootlogin: Option<u8>,
|
||||||
pub rekeylimit: Option<MaybeString>,
|
pub rekeylimit: Option<MaybeString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,11 +306,11 @@ pub struct WebGui {
|
|||||||
pub protocol: String,
|
pub protocol: String,
|
||||||
#[yaserde(rename = "ssl-certref")]
|
#[yaserde(rename = "ssl-certref")]
|
||||||
pub ssl_certref: String,
|
pub ssl_certref: String,
|
||||||
pub port: MaybeString,
|
pub port: Option<MaybeString>,
|
||||||
#[yaserde(rename = "ssl-ciphers")]
|
#[yaserde(rename = "ssl-ciphers")]
|
||||||
pub ssl_ciphers: MaybeString,
|
pub ssl_ciphers: Option<MaybeString>,
|
||||||
pub interfaces: MaybeString,
|
pub interfaces: Option<MaybeString>,
|
||||||
pub compression: MaybeString,
|
pub compression: Option<MaybeString>,
|
||||||
pub nohttpreferercheck: Option<u8>,
|
pub nohttpreferercheck: Option<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,7 +433,7 @@ pub struct OPNsenseXmlSection {
|
|||||||
#[yaserde(rename = "Interfaces")]
|
#[yaserde(rename = "Interfaces")]
|
||||||
pub interfaces: Option<ConfigInterfaces>,
|
pub interfaces: Option<ConfigInterfaces>,
|
||||||
#[yaserde(rename = "NodeExporter")]
|
#[yaserde(rename = "NodeExporter")]
|
||||||
pub node_exporter: Option<RawXml>,
|
pub node_exporter: Option<NodeExporter>,
|
||||||
#[yaserde(rename = "Kea")]
|
#[yaserde(rename = "Kea")]
|
||||||
pub kea: Option<RawXml>,
|
pub kea: Option<RawXml>,
|
||||||
pub monit: Option<Monit>,
|
pub monit: Option<Monit>,
|
||||||
@@ -1595,3 +1595,21 @@ pub struct Ifgroups {
|
|||||||
#[yaserde(attribute = true)]
|
#[yaserde(attribute = true)]
|
||||||
pub version: String,
|
pub version: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||||
|
pub struct NodeExporter {
|
||||||
|
pub enabled: u8,
|
||||||
|
pub listenaddress: Option<MaybeString>,
|
||||||
|
pub listenport: u16,
|
||||||
|
pub cpu: u8,
|
||||||
|
pub exec: u8,
|
||||||
|
pub filesystem: u8,
|
||||||
|
pub loadavg: u8,
|
||||||
|
pub meminfo: u8,
|
||||||
|
pub netdev: u8,
|
||||||
|
pub time: u8,
|
||||||
|
pub devstat: u8,
|
||||||
|
pub interrupts: u8,
|
||||||
|
pub ntp: u8,
|
||||||
|
pub zfs: u8,
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ use crate::{
|
|||||||
error::Error,
|
error::Error,
|
||||||
modules::{
|
modules::{
|
||||||
caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig,
|
caddy::CaddyConfig, dhcp_legacy::DhcpConfigLegacyISC, dns::UnboundDnsConfig,
|
||||||
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig, tftp::TftpConfig,
|
dnsmasq::DhcpConfigDnsMasq, load_balancer::LoadBalancerConfig,
|
||||||
|
node_exporter::NodeExporterConfig, tftp::TftpConfig,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use log::{debug, info, trace, warn};
|
use log::{debug, info, trace, warn};
|
||||||
@@ -13,6 +14,7 @@ use opnsense_config_xml::OPNsense;
|
|||||||
use russh::client;
|
use russh::client;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
|
||||||
use super::{ConfigManager, OPNsenseShell};
|
use super::{ConfigManager, OPNsenseShell};
|
||||||
|
|
||||||
@@ -71,6 +73,10 @@ impl Config {
|
|||||||
LoadBalancerConfig::new(&mut self.opnsense, self.shell.clone())
|
LoadBalancerConfig::new(&mut self.opnsense, self.shell.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn node_exporter(&mut self) -> NodeExporterConfig<'_> {
|
||||||
|
NodeExporterConfig::new(&mut self.opnsense, self.shell.clone())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn upload_files(&self, source: &str, destination: &str) -> Result<String, Error> {
|
pub async fn upload_files(&self, source: &str, destination: &str) -> Result<String, Error> {
|
||||||
self.shell.upload_folder(source, destination).await
|
self.shell.upload_folder(source, destination).await
|
||||||
}
|
}
|
||||||
@@ -150,7 +156,8 @@ impl Config {
|
|||||||
|
|
||||||
async fn reload_config(&mut self) -> Result<(), Error> {
|
async fn reload_config(&mut self) -> Result<(), Error> {
|
||||||
info!("Reloading opnsense live config");
|
info!("Reloading opnsense live config");
|
||||||
let (opnsense, sha2) = Self::get_opnsense_instance(self.repository.clone()).await?;
|
let (opnsense, _sha2) = Self::get_opnsense_instance(self.repository.clone()).await?;
|
||||||
|
self.opnsense = opnsense;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,4 +4,5 @@ pub mod dhcp_legacy;
|
|||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod dnsmasq;
|
pub mod dnsmasq;
|
||||||
pub mod load_balancer;
|
pub mod load_balancer;
|
||||||
|
pub mod node_exporter;
|
||||||
pub mod tftp;
|
pub mod tftp;
|
||||||
|
|||||||
54
opnsense-config/src/modules/node_exporter.rs
Normal file
54
opnsense-config/src/modules/node_exporter.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use opnsense_config_xml::{NodeExporter, OPNsense};
|
||||||
|
|
||||||
|
use crate::{config::OPNsenseShell, Error};
|
||||||
|
|
||||||
|
pub struct NodeExporterConfig<'a> {
|
||||||
|
opnsense: &'a mut OPNsense,
|
||||||
|
opnsense_shell: Arc<dyn OPNsenseShell>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> NodeExporterConfig<'a> {
|
||||||
|
pub fn new(opnsense: &'a mut OPNsense, opnsense_shell: Arc<dyn OPNsenseShell>) -> Self {
|
||||||
|
Self {
|
||||||
|
opnsense,
|
||||||
|
opnsense_shell,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_full_config(&self) -> &Option<NodeExporter> {
|
||||||
|
&self.opnsense.opnsense.node_exporter
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_node_exporter<F, R>(&mut self, f: F) -> Result<R, &'static str>
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut NodeExporter) -> R,
|
||||||
|
{
|
||||||
|
match &mut self.opnsense.opnsense.node_exporter.as_mut() {
|
||||||
|
Some(node_exporter) => Ok(f(node_exporter)),
|
||||||
|
None => Err("node exporter is not yet installed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn enable(&mut self, enabled: bool) -> Result<(), &'static str> {
|
||||||
|
self.with_node_exporter(|node_exporter| node_exporter.enabled = enabled as u8)
|
||||||
|
.map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn reload_restart(&self) -> Result<(), Error> {
|
||||||
|
self.opnsense_shell
|
||||||
|
.exec("configctl node_exporter stop")
|
||||||
|
.await?;
|
||||||
|
self.opnsense_shell
|
||||||
|
.exec("configctl template reload OPNsense/NodeExporter")
|
||||||
|
.await?;
|
||||||
|
self.opnsense_shell
|
||||||
|
.exec("configctl node_exporter configtest")
|
||||||
|
.await?;
|
||||||
|
self.opnsense_shell
|
||||||
|
.exec("configctl node_exporter start")
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user