Merge remote-tracking branch 'origin/master' into feat/install_opnsense_node_exporter
All checks were successful
Run Check Script / check (pull_request) Successful in 55s
All checks were successful
Run Check Script / check (pull_request) Successful in 55s
This commit is contained in:
61
Cargo.lock
generated
61
Cargo.lock
generated
@@ -690,6 +690,23 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brocade-switch"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"brocade",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"serde",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brotli"
|
||||
version = "8.0.2"
|
||||
@@ -1835,6 +1852,21 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-operatorhub-catalogsource"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-opnsense"
|
||||
version = "0.1.0"
|
||||
@@ -2479,6 +2511,19 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_inventory_builder"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cidr",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "harmony_macros"
|
||||
version = "0.1.0"
|
||||
@@ -2544,6 +2589,7 @@ dependencies = [
|
||||
name = "harmony_types"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"url",
|
||||
@@ -6049,6 +6095,21 @@ version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
||||
|
||||
[[package]]
|
||||
name = "test-score"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"env_logger",
|
||||
"harmony",
|
||||
"harmony_cli",
|
||||
"harmony_macros",
|
||||
"harmony_types",
|
||||
"log",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.69"
|
||||
|
||||
114
adr/015-higher-order-topologies.md
Normal file
114
adr/015-higher-order-topologies.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Architecture Decision Record: Higher-Order Topologies
|
||||
|
||||
**Initial Author:** Jean-Gabriel Gill-Couture
|
||||
**Initial Date:** 2025-12-08
|
||||
**Last Updated Date:** 2025-12-08
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
|
||||
## Context
|
||||
|
||||
Harmony models infrastructure as **Topologies** (deployment targets like `K8sAnywhereTopology`, `LinuxHostTopology`) implementing **Capabilities** (tech traits like `PostgreSQL`, `Docker`).
|
||||
|
||||
**Higher-Order Topologies** (e.g., `FailoverTopology<T>`) compose/orchestrate capabilities *across* multiple underlying topologies (e.g., primary+replica `T`).
|
||||
|
||||
Naive design requires manual `impl Capability for HigherOrderTopology<T>` *per T per capability*, causing:
|
||||
- **Impl explosion**: N topologies × M capabilities = N×M boilerplate.
|
||||
- **ISP violation**: Topologies forced to impl unrelated capabilities.
|
||||
- **Maintenance hell**: New topology needs impls for *all* orchestrated capabilities; new capability needs impls for *all* topologies/higher-order.
|
||||
- **Barrier to extension**: Users can't easily add topologies without todos/panics.
|
||||
|
||||
This makes scaling Harmony impractical as ecosystem grows.
|
||||
|
||||
## Decision
|
||||
|
||||
Use **blanket trait impls** on higher-order topologies to *automatically* derive orchestration:
|
||||
|
||||
````rust
|
||||
/// Higher-Order Topology: Orchestrates capabilities across sub-topologies.
|
||||
pub struct FailoverTopology<T> {
|
||||
/// Primary sub-topology.
|
||||
primary: T,
|
||||
/// Replica sub-topology.
|
||||
replica: T,
|
||||
}
|
||||
|
||||
/// Automatically provides PostgreSQL failover for *any* `T: PostgreSQL`.
|
||||
/// Delegates to primary for queries; orchestrates deploy across both.
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Deploy primary; extract certs/endpoint;
|
||||
// deploy replica with pg_basebackup + TLS passthrough.
|
||||
// (Full impl logged/elaborated.)
|
||||
}
|
||||
|
||||
// Delegate queries to primary.
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
self.primary.get_replication_certs(cluster_name).await
|
||||
}
|
||||
// ...
|
||||
}
|
||||
|
||||
/// Similarly for other capabilities.
|
||||
#[async_trait]
|
||||
impl<T: Docker> Docker for FailoverTopology<T> {
|
||||
// Failover Docker orchestration.
|
||||
}
|
||||
````
|
||||
|
||||
**Key properties:**
|
||||
- **Auto-derivation**: `Failover<K8sAnywhere>` gets `PostgreSQL` iff `K8sAnywhere: PostgreSQL`.
|
||||
- **No boilerplate**: One blanket impl per capability *per higher-order type*.
|
||||
|
||||
## Rationale
|
||||
|
||||
- **Composition via generics**: Rust trait solver auto-selects impls; zero runtime cost.
|
||||
- **Compile-time safety**: Missing `T: Capability` → compile error (no panics).
|
||||
- **Scalable**: O(capabilities) impls per higher-order; new `T` auto-works.
|
||||
- **ISP-respecting**: Capabilities only surface if sub-topology provides.
|
||||
- **Centralized logic**: Orchestration (e.g., cert propagation) in one place.
|
||||
|
||||
**Example usage:**
|
||||
````rust
|
||||
// ✅ Works: K8sAnywhere: PostgreSQL → Failover provides failover PG
|
||||
let pg_failover: FailoverTopology<K8sAnywhereTopology> = ...;
|
||||
pg_failover.deploy_pg(config).await;
|
||||
|
||||
// ✅ Works: LinuxHost: Docker → Failover provides failover Docker
|
||||
let docker_failover: FailoverTopology<LinuxHostTopology> = ...;
|
||||
docker_failover.deploy_docker(...).await;
|
||||
|
||||
// ❌ Compile fail: K8sAnywhere !: Docker
|
||||
let invalid: FailoverTopology<K8sAnywhereTopology>;
|
||||
invalid.deploy_docker(...); // `T: Docker` bound unsatisfied
|
||||
````
|
||||
|
||||
## Consequences
|
||||
|
||||
**Pros:**
|
||||
- **Extensible**: New topology `AWSTopology: PostgreSQL` → instant `Failover<AWSTopology>: PostgreSQL`.
|
||||
- **Lean**: No useless impls (e.g., no `K8sAnywhere: Docker`).
|
||||
- **Observable**: Logs trace every step.
|
||||
|
||||
**Cons:**
|
||||
- **Monomorphization**: Generics generate code per T (mitigated: few Ts).
|
||||
- **Delegation opacity**: Relies on rustdoc/logs for internals.
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
|----------|------|------|
|
||||
| **Manual per-T impls**<br>`impl PG for Failover<K8s> {..}`<br>`impl PG for Failover<Linux> {..}` | Explicit control | N×M explosion; violates ISP; hard to extend. |
|
||||
| **Dynamic trait objects**<br>`Box<dyn AnyCapability>` | Runtime flex | Perf hit; type erasure; error-prone dispatch. |
|
||||
| **Mega-topology trait**<br>All-in-one `OrchestratedTopology` | Simple wiring | Monolithic; poor composition. |
|
||||
| **Registry dispatch**<br>Runtime capability lookup | Decoupled | Complex; no compile safety; perf/debug overhead. |
|
||||
|
||||
**Selected**: Blanket impls leverage Rust generics for safe, zero-cost composition.
|
||||
|
||||
## Additional Notes
|
||||
|
||||
- Applies to `MultisiteTopology<T>`, `ShardedTopology<T>`, etc.
|
||||
- `FailoverTopology` in `failover.rs` is first implementation.
|
||||
153
adr/015-higher-order-topologies/example.rs
Normal file
153
adr/015-higher-order-topologies/example.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
//! Example of Higher-Order Topologies in Harmony.
|
||||
//! Demonstrates how `FailoverTopology<T>` automatically provides failover for *any* capability
|
||||
//! supported by a sub-topology `T` via blanket trait impls.
|
||||
//!
|
||||
//! Key insight: No manual impls per T or capability -- scales effortlessly.
|
||||
//! Users can:
|
||||
//! - Write new `Topology` (impl capabilities on a struct).
|
||||
//! - Compose with `FailoverTopology` (gets capabilities if T has them).
|
||||
//! - Compile fails if capability missing (safety).
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tokio;
|
||||
|
||||
/// Capability trait: Deploy and manage PostgreSQL.
|
||||
#[async_trait]
|
||||
pub trait PostgreSQL {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||
}
|
||||
|
||||
/// Capability trait: Deploy Docker.
|
||||
#[async_trait]
|
||||
pub trait Docker {
|
||||
async fn deploy_docker(&self) -> Result<String, String>;
|
||||
}
|
||||
|
||||
/// Configuration for PostgreSQL deployments.
|
||||
#[derive(Clone)]
|
||||
pub struct PostgreSQLConfig;
|
||||
|
||||
/// Replication certificates.
|
||||
#[derive(Clone)]
|
||||
pub struct ReplicationCerts;
|
||||
|
||||
/// Concrete topology: Kubernetes Anywhere (supports PostgreSQL).
|
||||
#[derive(Clone)]
|
||||
pub struct K8sAnywhereTopology;
|
||||
|
||||
#[async_trait]
|
||||
impl PostgreSQL for K8sAnywhereTopology {
|
||||
async fn deploy(&self, _config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Real impl: Use k8s helm chart, operator, etc.
|
||||
Ok("K8sAnywhere PostgreSQL deployed".to_string())
|
||||
}
|
||||
|
||||
async fn get_replication_certs(&self, _cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
Ok(ReplicationCerts)
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete topology: Linux Host (supports Docker).
|
||||
#[derive(Clone)]
|
||||
pub struct LinuxHostTopology;
|
||||
|
||||
#[async_trait]
|
||||
impl Docker for LinuxHostTopology {
|
||||
async fn deploy_docker(&self) -> Result<String, String> {
|
||||
// Real impl: Install/configure Docker on host.
|
||||
Ok("LinuxHost Docker deployed".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Higher-Order Topology: Composes multiple sub-topologies (primary + replica).
|
||||
/// Automatically derives *all* capabilities of `T` with failover orchestration.
|
||||
///
|
||||
/// - If `T: PostgreSQL`, then `FailoverTopology<T>: PostgreSQL` (blanket impl).
|
||||
/// - Same for `Docker`, etc. No boilerplate!
|
||||
/// - Compile-time safe: Missing `T: Capability` → error.
|
||||
#[derive(Clone)]
|
||||
pub struct FailoverTopology<T> {
|
||||
/// Primary sub-topology.
|
||||
pub primary: T,
|
||||
/// Replica sub-topology.
|
||||
pub replica: T,
|
||||
}
|
||||
|
||||
/// Blanket impl: Failover PostgreSQL if T provides PostgreSQL.
|
||||
/// Delegates reads to primary; deploys to both.
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL + Send + Sync + Clone> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
// Orchestrate: Deploy primary first, then replica (e.g., via pg_basebackup).
|
||||
let primary_result = self.primary.deploy(config).await?;
|
||||
let replica_result = self.replica.deploy(config).await?;
|
||||
Ok(format!("Failover PG deployed: {} | {}", primary_result, replica_result))
|
||||
}
|
||||
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
// Delegate to primary (replica follows).
|
||||
self.primary.get_replication_certs(cluster_name).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Blanket impl: Failover Docker if T provides Docker.
|
||||
#[async_trait]
|
||||
impl<T: Docker + Send + Sync + Clone> Docker for FailoverTopology<T> {
|
||||
async fn deploy_docker(&self) -> Result<String, String> {
|
||||
// Orchestrate across primary + replica.
|
||||
let primary_result = self.primary.deploy_docker().await?;
|
||||
let replica_result = self.replica.deploy_docker().await?;
|
||||
Ok(format!("Failover Docker deployed: {} | {}", primary_result, replica_result))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let config = PostgreSQLConfig;
|
||||
|
||||
println!("=== ✅ PostgreSQL Failover (K8sAnywhere supports PG) ===");
|
||||
let pg_failover = FailoverTopology {
|
||||
primary: K8sAnywhereTopology,
|
||||
replica: K8sAnywhereTopology,
|
||||
};
|
||||
let result = pg_failover.deploy(&config).await.unwrap();
|
||||
println!("Result: {}", result);
|
||||
|
||||
println!("\n=== ✅ Docker Failover (LinuxHost supports Docker) ===");
|
||||
let docker_failover = FailoverTopology {
|
||||
primary: LinuxHostTopology,
|
||||
replica: LinuxHostTopology,
|
||||
};
|
||||
let result = docker_failover.deploy_docker().await.unwrap();
|
||||
println!("Result: {}", result);
|
||||
|
||||
println!("\n=== ❌ Would fail to compile (K8sAnywhere !: Docker) ===");
|
||||
// let invalid = FailoverTopology {
|
||||
// primary: K8sAnywhereTopology,
|
||||
// replica: K8sAnywhereTopology,
|
||||
// };
|
||||
// invalid.deploy_docker().await.unwrap(); // Error: `K8sAnywhereTopology: Docker` not satisfied!
|
||||
// Very clear error message :
|
||||
// error[E0599]: the method `deploy_docker` exists for struct `FailoverTopology<K8sAnywhereTopology>`, but its trait bounds were not satisfied
|
||||
// --> src/main.rs:90:9
|
||||
// |
|
||||
// 4 | pub struct FailoverTopology<T> {
|
||||
// | ------------------------------ method `deploy_docker` not found for this struct because it doesn't satisfy `FailoverTopology<K8sAnywhereTopology>: Docker`
|
||||
// ...
|
||||
// 37 | struct K8sAnywhereTopology;
|
||||
// | -------------------------- doesn't satisfy `K8sAnywhereTopology: Docker`
|
||||
// ...
|
||||
// 90 | invalid.deploy_docker(); // `T: Docker` bound unsatisfied
|
||||
// | ^^^^^^^^^^^^^ method cannot be called on `FailoverTopology<K8sAnywhereTopology>` due to unsatisfied trait bounds
|
||||
// |
|
||||
// note: trait bound `K8sAnywhereTopology: Docker` was not satisfied
|
||||
// --> src/main.rs:61:9
|
||||
// |
|
||||
// 61 | impl<T: Docker + Send + Sync> Docker for FailoverTopology<T> {
|
||||
// | ^^^^^^ ------ -------------------
|
||||
// | |
|
||||
// | unsatisfied trait bound introduced here
|
||||
// note: the trait `Docker` must be implemented
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
use brocade::{BrocadeOptions, ssh};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::switch::PortLocation;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -16,23 +16,28 @@ async fn main() {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 250)); // old brocade @ ianlet
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 55, 101)); // brocade @ sto1
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); // brocade @ sto1
|
||||
// let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 11)); // brocade @ st
|
||||
let switch_addresses = vec![ip];
|
||||
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
// let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
// .await
|
||||
// .unwrap();
|
||||
|
||||
let brocade = brocade::init(
|
||||
&switch_addresses,
|
||||
22,
|
||||
&config.username,
|
||||
&config.password,
|
||||
Some(BrocadeOptions {
|
||||
// &config.username,
|
||||
// &config.password,
|
||||
"admin",
|
||||
"password",
|
||||
BrocadeOptions {
|
||||
dry_run: true,
|
||||
ssh: ssh::SshOptions {
|
||||
port: 2222,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Brocade client failed to connect");
|
||||
@@ -54,6 +59,7 @@ async fn main() {
|
||||
}
|
||||
|
||||
println!("--------------");
|
||||
todo!();
|
||||
let channel_name = "1";
|
||||
brocade.clear_port_channel(channel_name).await.unwrap();
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use super::BrocadeClient;
|
||||
use crate::{
|
||||
BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo, MacAddressEntry,
|
||||
PortChannelId, PortOperatingMode, parse_brocade_mac_address, shell::BrocadeShell,
|
||||
PortChannelId, PortOperatingMode, SecurityLevel, parse_brocade_mac_address,
|
||||
shell::BrocadeShell,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -140,7 +141,7 @@ impl BrocadeClient for FastIronClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
_interfaces: Vec<(String, PortOperatingMode)>,
|
||||
_interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
@@ -209,4 +210,20 @@ impl BrocadeClient for FastIronClient {
|
||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||
let commands = vec![
|
||||
"configure terminal".into(),
|
||||
"snmp-server view ALL 1 included".into(),
|
||||
"snmp-server group public v3 priv read ALL".into(),
|
||||
format!(
|
||||
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
|
||||
),
|
||||
"exit".into(),
|
||||
];
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,11 +14,12 @@ use async_trait::async_trait;
|
||||
use harmony_types::net::MacAddress;
|
||||
use harmony_types::switch::{PortDeclaration, PortLocation};
|
||||
use regex::Regex;
|
||||
use serde::Serialize;
|
||||
|
||||
mod fast_iron;
|
||||
mod network_operating_system;
|
||||
mod shell;
|
||||
mod ssh;
|
||||
pub mod ssh;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct BrocadeOptions {
|
||||
@@ -118,7 +119,7 @@ impl fmt::Display for InterfaceType {
|
||||
}
|
||||
|
||||
/// Defines the primary configuration mode of a switch interface, representing mutually exclusive roles.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize)]
|
||||
pub enum PortOperatingMode {
|
||||
/// The interface is explicitly configured for Brocade fabric roles (ISL or Trunk enabled).
|
||||
Fabric,
|
||||
@@ -141,12 +142,11 @@ pub enum InterfaceStatus {
|
||||
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Box<dyn BrocadeClient + Send + Sync>, Error> {
|
||||
let shell = BrocadeShell::init(ip_addresses, port, username, password, options).await?;
|
||||
let shell = BrocadeShell::init(ip_addresses, username, password, options).await?;
|
||||
|
||||
let version_info = shell
|
||||
.with_session(ExecutionMode::Regular, |session| {
|
||||
@@ -208,7 +208,7 @@ pub trait BrocadeClient: std::fmt::Debug {
|
||||
/// Configures a set of interfaces to be operated with a specified mode (access ports, ISL, etc.).
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Scans the existing configuration to find the next available (unused)
|
||||
@@ -237,6 +237,15 @@ pub trait BrocadeClient: std::fmt::Debug {
|
||||
ports: &[PortLocation],
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Enables Simple Network Management Protocol (SNMP) server for switch
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `user_name`: The user name for the snmp server
|
||||
/// * `auth`: The password for authentication process for verifying the identity of a device
|
||||
/// * `des`: The Data Encryption Standard algorithm key
|
||||
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error>;
|
||||
|
||||
/// Removes all configuration associated with the specified Port-Channel name.
|
||||
///
|
||||
/// This operation should be idempotent; attempting to clear a non-existent
|
||||
@@ -300,6 +309,11 @@ fn parse_brocade_mac_address(value: &str) -> Result<MacAddress, String> {
|
||||
Ok(MacAddress(bytes))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SecurityLevel {
|
||||
AuthPriv(String),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
NetworkError(String),
|
||||
|
||||
@@ -8,7 +8,7 @@ use regex::Regex;
|
||||
use crate::{
|
||||
BrocadeClient, BrocadeInfo, Error, ExecutionMode, InterSwitchLink, InterfaceInfo,
|
||||
InterfaceStatus, InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||
parse_brocade_mac_address, shell::BrocadeShell,
|
||||
SecurityLevel, parse_brocade_mac_address, shell::BrocadeShell,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -187,7 +187,7 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
info!("[Brocade] Configuring {} interface(s)...", interfaces.len());
|
||||
|
||||
@@ -204,9 +204,12 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
PortOperatingMode::Trunk => {
|
||||
commands.push("switchport".into());
|
||||
commands.push("switchport mode trunk".into());
|
||||
commands.push("no spanning-tree shutdown".into());
|
||||
commands.push("switchport trunk allowed vlan all".into());
|
||||
commands.push("no switchport trunk tag native-vlan".into());
|
||||
commands.push("spanning-tree shutdown".into());
|
||||
commands.push("no fabric isl enable".into());
|
||||
commands.push("no fabric trunk enable".into());
|
||||
commands.push("no shutdown".into());
|
||||
}
|
||||
PortOperatingMode::Access => {
|
||||
commands.push("switchport".into());
|
||||
@@ -330,4 +333,20 @@ impl BrocadeClient for NetworkOperatingSystemClient {
|
||||
info!("[Brocade] Port-channel '{channel_name}' cleared.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||
let commands = vec![
|
||||
"configure terminal".into(),
|
||||
"snmp-server view ALL 1 included".into(),
|
||||
"snmp-server group public v3 priv read ALL".into(),
|
||||
format!(
|
||||
"snmp-server user {user_name} groupname public auth md5 auth-password {auth} priv des priv-password {des}"
|
||||
),
|
||||
"exit".into(),
|
||||
];
|
||||
self.shell
|
||||
.run_commands(commands, ExecutionMode::Regular)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ use tokio::time::timeout;
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeShell {
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
username: String,
|
||||
password: String,
|
||||
options: BrocadeOptions,
|
||||
@@ -27,33 +26,31 @@ pub struct BrocadeShell {
|
||||
impl BrocadeShell {
|
||||
pub async fn init(
|
||||
ip_addresses: &[IpAddr],
|
||||
port: u16,
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let ip = ip_addresses
|
||||
.first()
|
||||
.ok_or_else(|| Error::ConfigurationError("No IP addresses provided".to_string()))?;
|
||||
|
||||
let base_options = options.unwrap_or_default();
|
||||
let options = ssh::try_init_client(username, password, ip, base_options).await?;
|
||||
let brocade_ssh_client_options =
|
||||
ssh::try_init_client(username, password, ip, options).await?;
|
||||
|
||||
Ok(Self {
|
||||
ip: *ip,
|
||||
port,
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
before_all_commands: vec![],
|
||||
after_all_commands: vec![],
|
||||
options,
|
||||
options: brocade_ssh_client_options,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn open_session(&self, mode: ExecutionMode) -> Result<BrocadeSession, Error> {
|
||||
BrocadeSession::open(
|
||||
self.ip,
|
||||
self.port,
|
||||
self.options.ssh.port,
|
||||
&self.username,
|
||||
&self.password,
|
||||
self.options.clone(),
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use russh::client::Handler;
|
||||
use russh::kex::DH_G1_SHA1;
|
||||
use russh::kex::ECDH_SHA2_NISTP256;
|
||||
@@ -10,29 +11,43 @@ use russh_keys::key::SSH_RSA;
|
||||
use super::BrocadeOptions;
|
||||
use super::Error;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SshOptions {
|
||||
pub preferred_algorithms: russh::Preferred,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl Default for SshOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
preferred_algorithms: Default::default(),
|
||||
port: 22,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SshOptions {
|
||||
fn ecdhsa_sha2_nistp256() -> Self {
|
||||
fn ecdhsa_sha2_nistp256(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[ECDH_SHA2_NISTP256]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn legacy() -> Self {
|
||||
fn legacy(port: u16) -> Self {
|
||||
Self {
|
||||
preferred_algorithms: russh::Preferred {
|
||||
kex: Cow::Borrowed(&[DH_G1_SHA1]),
|
||||
key: Cow::Borrowed(&[SSH_RSA]),
|
||||
..Default::default()
|
||||
},
|
||||
port,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,18 +72,21 @@ pub async fn try_init_client(
|
||||
ip: &std::net::IpAddr,
|
||||
base_options: BrocadeOptions,
|
||||
) -> Result<BrocadeOptions, Error> {
|
||||
let mut default = SshOptions::default();
|
||||
default.port = base_options.ssh.port;
|
||||
let ssh_options = vec![
|
||||
SshOptions::default(),
|
||||
SshOptions::ecdhsa_sha2_nistp256(),
|
||||
SshOptions::legacy(),
|
||||
default,
|
||||
SshOptions::ecdhsa_sha2_nistp256(base_options.ssh.port),
|
||||
SshOptions::legacy(base_options.ssh.port),
|
||||
];
|
||||
|
||||
for ssh in ssh_options {
|
||||
let opts = BrocadeOptions {
|
||||
ssh,
|
||||
ssh: ssh.clone(),
|
||||
..base_options.clone()
|
||||
};
|
||||
let client = create_client(*ip, 22, username, password, &opts).await;
|
||||
debug!("Creating client {ip}:{} {username}", ssh.port);
|
||||
let client = create_client(*ip, ssh.port, username, password, &opts).await;
|
||||
|
||||
match client {
|
||||
Ok(_) => {
|
||||
|
||||
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
BIN
data/pxe/okd/http_files/harmony_inventory_agent
(Stored with Git LFS)
Binary file not shown.
133
docs/doc-clone-and-restore-coreos.md
Normal file
133
docs/doc-clone-and-restore-coreos.md
Normal file
@@ -0,0 +1,133 @@
|
||||
## Working procedure to clone and restore CoreOS disk from OKD Cluster
|
||||
|
||||
### **Step 1 - take a backup**
|
||||
```
|
||||
sudo dd if=/dev/old of=/dev/backup status=progress
|
||||
```
|
||||
|
||||
### **Step 2 - clone beginning of old disk to new**
|
||||
```
|
||||
sudo dd if=/dev/old of=/dev/backup status=progress count=1000 bs=1M
|
||||
```
|
||||
|
||||
### **Step 3 - verify and modify disk partitions**
|
||||
list disk partitions
|
||||
```
|
||||
sgdisk -p /dev/new
|
||||
```
|
||||
if new disk is smaller than old disk and there is space on the xfs partition of the old disk, modify partitions of new disk
|
||||
```
|
||||
gdisk /dev/new
|
||||
```
|
||||
inside of gdisk commands
|
||||
```
|
||||
-v -> verify table
|
||||
-p -> print table
|
||||
-d -> select partition to delete partition
|
||||
-n -> recreate partition with same partition number as deleted partition
|
||||
```
|
||||
For end sector, either specify the new end or just press Enter for maximum available
|
||||
When asked about partition type, enter the same type code (it will show the old one)
|
||||
```
|
||||
p - >to verify
|
||||
w -> to write
|
||||
```
|
||||
make xfs file system for new partition <new4>
|
||||
```
|
||||
sudo mkfs.xfs -f /dev/new4
|
||||
```
|
||||
|
||||
### **Step 4 - copy old PARTUUID **
|
||||
|
||||
**careful here**
|
||||
get old patuuid:
|
||||
```
|
||||
sgdisk -i <partition_number> /dev/old_disk # Note the "Partition unique GUID"
|
||||
```
|
||||
get labels
|
||||
```
|
||||
sgdisk -p /dev/old_disk # Shows partition names in the table
|
||||
|
||||
blkid /dev/old_disk* # Shows PARTUUIDs and labels for all partitions
|
||||
```
|
||||
set it on new disk
|
||||
```
|
||||
sgdisk -u <partition_number>:<old_partuuid> /dev/sdc
|
||||
```
|
||||
partition name:
|
||||
```
|
||||
sgdisk -c <partition_number>:"<old_name>" /dev/sdc
|
||||
```
|
||||
verify all:
|
||||
```
|
||||
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/old_disk
|
||||
```
|
||||
|
||||
### **Step 5 - Mount disks and copy files from old to new disk**
|
||||
|
||||
mount files before copy:
|
||||
|
||||
```
|
||||
mkdir -p /mnt/new
|
||||
mkdir -p /mnt/old
|
||||
mount /dev/old4 /mnt/old
|
||||
mount /dev/new4 /mnt/new
|
||||
```
|
||||
copy:
|
||||
|
||||
with -n flag can run as dry-run
|
||||
```
|
||||
rsync -aAXHvn --numeric-ids /source/ /destination/
|
||||
```
|
||||
|
||||
```
|
||||
rsync -aAXHv --numeric-ids /source/ /destination/
|
||||
```
|
||||
|
||||
### **Step 6 - Set correct UUID for new partition 4**
|
||||
to set uuid with xfs_admin you must unmount first
|
||||
|
||||
unmount old devices
|
||||
```
|
||||
umount /mnt/new
|
||||
umount /mnt/old
|
||||
```
|
||||
|
||||
to set correct uuid for partition 4
|
||||
```
|
||||
blkid /dev/old4
|
||||
```
|
||||
```
|
||||
xfs_admin -U <old_uuid> /dev/new_partition
|
||||
```
|
||||
to set labels
|
||||
get it
|
||||
```
|
||||
sgdisk -i 4 /dev/sda | grep "Partition name"
|
||||
```
|
||||
set it
|
||||
```
|
||||
sgdisk -c 4:"<label_name>" /dev/sdc
|
||||
|
||||
or
|
||||
|
||||
(check existing with xfs_admin -l /dev/old_partition)
|
||||
Use xfs_admin -L <label> /dev/new_partition
|
||||
```
|
||||
|
||||
### **Step 7 - Verify**
|
||||
|
||||
verify everything:
|
||||
```
|
||||
sgdisk -p /dev/sda # Old disk
|
||||
sgdisk -p /dev/sdc # New disk
|
||||
```
|
||||
```
|
||||
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sda
|
||||
lsblk -o NAME,SIZE,PARTUUID,PARTLABEL /dev/sdc
|
||||
```
|
||||
```
|
||||
blkid /dev/sda* | grep UUID=
|
||||
blkid /dev/sdc* | grep UUID=
|
||||
```
|
||||
|
||||
56
docs/doc-remove-worker-flag.md
Normal file
56
docs/doc-remove-worker-flag.md
Normal file
@@ -0,0 +1,56 @@
|
||||
## **Remove Worker flag from OKD Control Planes**
|
||||
|
||||
### **Context**
|
||||
On OKD user provisioned infrastructure the control plane nodes can have the flag node-role.kubernetes.io/worker which allows non critical workloads to be scheduled on the control-planes
|
||||
|
||||
### **Observed Symptoms**
|
||||
- After adding HAProxy servers to the backend each back end appears down
|
||||
- Traffic is redirected to the control planes instead of workers
|
||||
- The pods router-default are incorrectly applied on the control planes rather than on the workers
|
||||
- Pods are being scheduled on the control planes causing cluster instability
|
||||
|
||||
```
|
||||
ss -tlnp | grep 80
|
||||
```
|
||||
- shows process haproxy is listening at 0.0.0.0:80 on cps
|
||||
- same problem for port 443
|
||||
- In namespace rook-ceph certain pods are deploted on cps rather than on worker nodes
|
||||
|
||||
### **Cause**
|
||||
- when intalling UPI, the roles (master, worker) are not managed by the Machine Config operator and the cps are made schedulable by default.
|
||||
|
||||
### **Diagnostic**
|
||||
check node labels:
|
||||
```
|
||||
oc get nodes --show-labels | grep control-plane
|
||||
```
|
||||
Inspecter kubelet configuration:
|
||||
|
||||
```
|
||||
cat /etc/systemd/system/kubelet.service
|
||||
```
|
||||
|
||||
find the line:
|
||||
```
|
||||
--node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node-role.kubernetes.io/worker
|
||||
```
|
||||
→ presence of label worker confirms the problem.
|
||||
|
||||
Verify the flag doesnt come from MCO
|
||||
```
|
||||
oc get machineconfig | grep rendered-master
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
To make the control planes non schedulable you must patch the cluster scheduler resource
|
||||
|
||||
```
|
||||
oc patch scheduler cluster --type merge -p '{"spec":{"mastersSchedulable":false}}'
|
||||
```
|
||||
after the patch is applied the workloads can be deplaced by draining the nodes
|
||||
|
||||
```
|
||||
oc adm cordon <cp-node>
|
||||
oc adm drain <cp-node> --ignore-daemonsets –delete-emptydir-data
|
||||
```
|
||||
|
||||
105
docs/modules/Multisite_PostgreSQL.md
Normal file
105
docs/modules/Multisite_PostgreSQL.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Design Document: Harmony PostgreSQL Module
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2025-12-01
|
||||
**Context:** Multi-site Data Replication & Orchestration
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The Harmony PostgreSQL Module provides a high-level abstraction for deploying and managing high-availability PostgreSQL clusters across geographically distributed Kubernetes/OKD sites.
|
||||
|
||||
Instead of manually configuring complex replication slots, firewalls, and operator settings on each cluster, users define a single intent (a **Score**), and Harmony orchestrates the underlying infrastructure (the **Arrangement**) to establish a Primary-Replica architecture.
|
||||
|
||||
Currently, the implementation relies on the **CloudNativePG (CNPG)** operator as the backing engine.
|
||||
|
||||
## 2. Architecture
|
||||
|
||||
### 2.1 The Abstraction Model
|
||||
Following **ADR 003 (Infrastructure Abstraction)**, Harmony separates the *intent* from the *implementation*.
|
||||
|
||||
1. **The Score (Intent):** The user defines a `MultisitePostgreSQL` resource. This describes *what* is needed (e.g., "A Postgres 15 cluster with 10GB storage, Primary on Site A, Replica on Site B").
|
||||
2. **The Interpret (Action):** Harmony MultisitePostgreSQLInterpret processes this Score and orchestrates the deployment on both sites to reach the state defined in the Score.
|
||||
3. **The Capability (Implementation):** The PostgreSQL Capability is implemented by the K8sTopology and the interpret can deploy it, configure it and fetch information about it. The concrete implementation will rely on the mature CloudnativePG operator to manage all the Kubernetes resources required.
|
||||
|
||||
### 2.2 Network Connectivity (TLS Passthrough)
|
||||
|
||||
One of the critical challenges in multi-site orchestration is secure connectivity between clusters that may have dynamic IPs or strict firewalls.
|
||||
|
||||
To solve this, we utilize **OKD/OpenShift Routes with TLS Passthrough**.
|
||||
|
||||
* **Mechanism:** The Primary site exposes a `Route` configured for `termination: passthrough`.
|
||||
* **Routing:** The OpenShift HAProxy router inspects the **SNI (Server Name Indication)** header of the incoming TCP connection to route traffic to the correct PostgreSQL Pod.
|
||||
* **Security:** SSL is **not** terminated at the ingress router. The encrypted stream is passed directly to the PostgreSQL instance. Mutual TLS (mTLS) authentication is handled natively by CNPG between the Primary and Replica instances.
|
||||
* **Dynamic IPs:** Because connections are established via DNS hostnames (the Route URL), this architecture is resilient to dynamic IP changes at the Primary site.
|
||||
|
||||
#### Traffic Flow Diagram
|
||||
|
||||
```text
|
||||
[ Site B: Replica ] [ Site A: Primary ]
|
||||
| |
|
||||
(CNPG Instance) --[Encrypted TCP]--> (OKD HAProxy Router)
|
||||
| (Port 443) |
|
||||
| |
|
||||
| [SNI Inspection]
|
||||
| |
|
||||
| v
|
||||
| (PostgreSQL Primary Pod)
|
||||
| (Port 5432)
|
||||
```
|
||||
|
||||
## 3. Design Decisions
|
||||
|
||||
### Why CloudNativePG?
|
||||
We selected CloudNativePG because it relies exclusively on standard Kubernetes primitives and uses the native PostgreSQL replication protocol (WAL shipping/Streaming). This aligns with Harmony's goal of being "K8s Native."
|
||||
|
||||
### Why TLS Passthrough instead of VPN/NodePort?
|
||||
* **NodePort:** Requires static IPs and opening non-standard ports on the firewall, which violates our security constraints.
|
||||
* **VPN (e.g., Wireguard/Tailscale):** While secure, it introduces significant complexity (sidecars, key management) and external dependencies.
|
||||
* **TLS Passthrough:** Leverages the existing Ingress/Router infrastructure already present in OKD. It requires zero additional software and respects multi-tenancy (Routes are namespaced).
|
||||
|
||||
### Configuration Philosophy (YAGNI)
|
||||
The current design exposes a **generic configuration surface**. Users can configure standard parameters (Storage size, CPU/Memory requests, Postgres version).
|
||||
|
||||
**We explicitly do not expose advanced CNPG or PostgreSQL configurations at this stage.**
|
||||
|
||||
* **Reasoning:** We aim to keep the API surface small and manageable.
|
||||
* **Future Path:** We plan to implement a "pass-through" mechanism to allow sending raw config maps or custom parameters to the underlying engine (CNPG) *only when a concrete use case arises*. Until then, we adhere to the **YAGNI (You Ain't Gonna Need It)** principle to avoid premature optimization and API bloat.
|
||||
|
||||
## 4. Usage Guide
|
||||
|
||||
To deploy a multi-site cluster, apply the `MultisitePostgreSQL` resource to the Harmony Control Plane.
|
||||
|
||||
### Example Manifest
|
||||
|
||||
```yaml
|
||||
apiVersion: harmony.io/v1alpha1
|
||||
kind: MultisitePostgreSQL
|
||||
metadata:
|
||||
name: finance-db
|
||||
namespace: tenant-a
|
||||
spec:
|
||||
version: "15"
|
||||
storage: "10Gi"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "1Gi"
|
||||
|
||||
# Topology Definition
|
||||
topology:
|
||||
primary:
|
||||
site: "site-paris" # The name of the cluster in Harmony
|
||||
replicas:
|
||||
- site: "site-newyork"
|
||||
```
|
||||
|
||||
### What happens next?
|
||||
1. Harmony detects the CR.
|
||||
2. **On Site Paris:** It deploys a CNPG Cluster (Primary) and creates a Passthrough Route `postgres-finance-db.apps.site-paris.example.com`.
|
||||
3. **On Site New York:** It deploys a CNPG Cluster (Replica) configured with `externalClusters` pointing to the Paris Route.
|
||||
4. Data begins replicating immediately over the encrypted channel.
|
||||
|
||||
## 5. Troubleshooting
|
||||
|
||||
* **Connection Refused:** Ensure the Primary site's Route is successfully admitted by the Ingress Controller.
|
||||
* **Certificate Errors:** CNPG manages mTLS automatically. If errors persist, ensure the CA secrets were correctly propagated by Harmony from Primary to Replica namespaces.
|
||||
BIN
empty_database.sqlite
Normal file
BIN
empty_database.sqlite
Normal file
Binary file not shown.
20
examples/brocade_snmp_server/Cargo.toml
Normal file
20
examples/brocade_snmp_server/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "brocade-snmp-server"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
brocade = { path = "../../brocade" }
|
||||
harmony_secret = { path = "../../harmony_secret" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
22
examples/brocade_snmp_server/src/main.rs
Normal file
22
examples/brocade_snmp_server/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory, modules::brocade::BrocadeEnableSnmpScore, topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let brocade_snmp_server = BrocadeEnableSnmpScore {
|
||||
switch_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 1, 111))],
|
||||
dry_run: true,
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(brocade_snmp_server)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
19
examples/brocade_switch/Cargo.toml
Normal file
19
examples/brocade_switch/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "brocade-switch"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
brocade = { path = "../../brocade" }
|
||||
157
examples/brocade_switch/src/main.rs
Normal file
157
examples/brocade_switch/src/main.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeOptions, PortOperatingMode};
|
||||
use harmony::{
|
||||
data::Version,
|
||||
infra::brocade::BrocadeSwitchClient,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{
|
||||
HostNetworkConfig, PortConfig, PreparationError, PreparationOutcome, Switch, SwitchClient,
|
||||
SwitchError, Topology,
|
||||
},
|
||||
};
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{id::Id, net::MacAddress, switch::PortLocation};
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let switch_score = BrocadeSwitchScore {
|
||||
port_channels_to_clear: vec![
|
||||
Id::from_str("17").unwrap(),
|
||||
Id::from_str("19").unwrap(),
|
||||
Id::from_str("18").unwrap(),
|
||||
],
|
||||
ports_to_configure: vec![
|
||||
(PortLocation(2, 0, 17), PortOperatingMode::Trunk),
|
||||
(PortLocation(2, 0, 19), PortOperatingMode::Trunk),
|
||||
(PortLocation(1, 0, 18), PortOperatingMode::Trunk),
|
||||
],
|
||||
};
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
SwitchTopology::new().await,
|
||||
vec![Box::new(switch_score)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
struct BrocadeSwitchScore {
|
||||
port_channels_to_clear: Vec<Id>,
|
||||
ports_to_configure: Vec<PortConfig>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Switch> Score<T> for BrocadeSwitchScore {
|
||||
fn name(&self) -> String {
|
||||
"BrocadeSwitchScore".to_string()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(BrocadeSwitchInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BrocadeSwitchInterpret {
|
||||
score: BrocadeSwitchScore,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Switch> Interpret<T> for BrocadeSwitchInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!("Applying switch configuration {:?}", self.score);
|
||||
debug!(
|
||||
"Clearing port channel {:?}",
|
||||
self.score.port_channels_to_clear
|
||||
);
|
||||
topology
|
||||
.clear_port_channel(&self.score.port_channels_to_clear)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
debug!("Configuring interfaces {:?}", self.score.ports_to_configure);
|
||||
topology
|
||||
.configure_interface(&self.score.ports_to_configure)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
Ok(Outcome::success("switch configured".to_string()))
|
||||
}
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("BrocadeSwitchInterpret")
|
||||
}
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct SwitchTopology {
|
||||
client: Box<dyn SwitchClient>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Topology for SwitchTopology {
|
||||
fn name(&self) -> &str {
|
||||
"SwitchTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
Ok(PreparationOutcome::Noop)
|
||||
}
|
||||
}
|
||||
|
||||
impl SwitchTopology {
|
||||
async fn new() -> Self {
|
||||
let mut options = BrocadeOptions::default();
|
||||
options.ssh.port = 2222;
|
||||
let client =
|
||||
BrocadeSwitchClient::init(&vec![ip!("127.0.0.1")], &"admin", &"password", options)
|
||||
.await
|
||||
.expect("Failed to connect to switch");
|
||||
|
||||
let client = Box::new(client);
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Switch for SwitchTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_port_for_mac_address(
|
||||
&self,
|
||||
_mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_port_channel(&self, _config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
self.client.clear_port_channel(ids).await
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
self.client.configure_interface(ports).await
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{
|
||||
dummy::{ErrorScore, PanicScore, SuccessScore},
|
||||
inventory::LaunchDiscoverInventoryAgentScore,
|
||||
inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
@@ -18,6 +18,7 @@ async fn main() {
|
||||
Box::new(PanicScore {}),
|
||||
Box::new(LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: Some(10),
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}),
|
||||
],
|
||||
None,
|
||||
|
||||
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
15
examples/harmony_inventory_builder/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "harmony_inventory_builder"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
tokio.workspace = true
|
||||
url.workspace = true
|
||||
cidr.workspace = true
|
||||
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
11
examples/harmony_inventory_builder/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_builder --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../../target/x86_64-unknown-linux-musl/release/harmony_inventory_builder .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
10
examples/harmony_inventory_builder/docker/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_builder /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD ["sleep", "infinity"]
|
||||
36
examples/harmony_inventory_builder/src/main.rs
Normal file
36
examples/harmony_inventory_builder/src/main.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use harmony::{
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
topology::LocalhostTopology,
|
||||
};
|
||||
use harmony_macros::cidrv4;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let discover_worker = DiscoverHostForRoleScore {
|
||||
role: HostRole::Worker,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
let discover_control_plane = DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
number_desired_hosts: 3,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::SUBNET {
|
||||
cidr: cidrv4!("192.168.0.1/25"),
|
||||
port: 25000,
|
||||
},
|
||||
};
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
LocalhostTopology::new(),
|
||||
vec![Box::new(discover_worker), Box::new(discover_control_plane)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
@@ -39,10 +39,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.33.101")];
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -108,6 +108,7 @@ async fn main() {
|
||||
],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
|
||||
@@ -4,7 +4,10 @@ use crate::topology::{get_inventory, get_topology};
|
||||
use harmony::{
|
||||
config::secret::SshKeyPair,
|
||||
data::{FileContent, FilePath},
|
||||
modules::okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||
modules::{
|
||||
inventory::HarmonyDiscoveryStrategy,
|
||||
okd::{installation::OKDInstallationPipeline, ipxe::OKDIpxeScore},
|
||||
},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -26,7 +29,8 @@ async fn main() {
|
||||
},
|
||||
})];
|
||||
|
||||
scores.append(&mut OKDInstallationPipeline::get_all_scores().await);
|
||||
scores
|
||||
.append(&mut OKDInstallationPipeline::get_all_scores(HarmonyDiscoveryStrategy::MDNS).await);
|
||||
|
||||
harmony_cli::run(inventory, topology, scores, None)
|
||||
.await
|
||||
|
||||
@@ -9,7 +9,10 @@ use harmony::{
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
use std::{
|
||||
net::IpAddr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
#[derive(Secret, Serialize, Deserialize, Debug, PartialEq)]
|
||||
struct OPNSenseFirewallConfig {
|
||||
@@ -28,10 +31,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -82,6 +85,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ use harmony::{
|
||||
use harmony_macros::{ip, ipv4};
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{net::IpAddr, sync::Arc};
|
||||
use std::{
|
||||
net::IpAddr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
pub async fn get_topology() -> HAClusterTopology {
|
||||
let firewall = harmony::topology::LogicalHost {
|
||||
@@ -23,10 +26,10 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.1.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -77,6 +80,7 @@ pub async fn get_topology() -> HAClusterTopology {
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
18
examples/operatorhub_catalog/Cargo.toml
Normal file
18
examples/operatorhub_catalog/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "example-operatorhub-catalogsource"
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
harmony = { path = "../../harmony" }
|
||||
harmony_cli = { path = "../../harmony_cli" }
|
||||
harmony_types = { path = "../../harmony_types" }
|
||||
cidr = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
harmony_macros = { path = "../../harmony_macros" }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
url = { workspace = true }
|
||||
22
examples/operatorhub_catalog/src/main.rs
Normal file
22
examples/operatorhub_catalog/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use harmony::{
|
||||
inventory::Inventory,
|
||||
modules::{k8s::apps::OperatorHubCatalogSourceScore, postgresql::CloudNativePgOperatorScore},
|
||||
topology::K8sAnywhereTopology,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let operatorhub_catalog = OperatorHubCatalogSourceScore::default();
|
||||
let cnpg_operator = CloudNativePgOperatorScore::default();
|
||||
|
||||
harmony_cli::run(
|
||||
Inventory::autoload(),
|
||||
K8sAnywhereTopology::from_env(),
|
||||
vec![Box::new(operatorhub_catalog), Box::new(cnpg_operator)],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::Arc,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use brocade::BrocadeOptions;
|
||||
@@ -35,10 +35,10 @@ async fn main() {
|
||||
.expect("Failed to get credentials");
|
||||
|
||||
let switches: Vec<IpAddr> = vec![ip!("192.168.5.101")]; // TODO: Adjust me
|
||||
let brocade_options = Some(BrocadeOptions {
|
||||
let brocade_options = BrocadeOptions {
|
||||
dry_run: *harmony::config::DRY_RUN,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
let switch_client = BrocadeSwitchClient::init(
|
||||
&switches,
|
||||
&switch_auth.username,
|
||||
@@ -80,6 +80,7 @@ async fn main() {
|
||||
workers: vec![],
|
||||
node_exporter: opnsense.clone(),
|
||||
switch_client: switch_client.clone(),
|
||||
network_manager: OnceLock::new(),
|
||||
};
|
||||
|
||||
let inventory = Inventory {
|
||||
|
||||
@@ -152,10 +152,10 @@ impl PhysicalHost {
|
||||
pub fn parts_list(&self) -> String {
|
||||
let PhysicalHost {
|
||||
id,
|
||||
category,
|
||||
category: _,
|
||||
network,
|
||||
storage,
|
||||
labels,
|
||||
labels: _,
|
||||
memory_modules,
|
||||
cpus,
|
||||
} = self;
|
||||
@@ -226,8 +226,8 @@ impl PhysicalHost {
|
||||
speed_mhz,
|
||||
manufacturer,
|
||||
part_number,
|
||||
serial_number,
|
||||
rank,
|
||||
serial_number: _,
|
||||
rank: _,
|
||||
} = mem;
|
||||
parts_list.push_str(&format!(
|
||||
"\n{}Gb, {}Mhz, Manufacturer ({}), Part Number ({})",
|
||||
|
||||
@@ -4,6 +4,8 @@ use std::error::Error;
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
|
||||
use crate::inventory::HostRole;
|
||||
|
||||
use super::{
|
||||
data::Version, executors::ExecutorError, inventory::Inventory, topology::PreparationError,
|
||||
};
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
mod repository;
|
||||
use std::fmt;
|
||||
|
||||
pub use repository::*;
|
||||
|
||||
#[derive(Debug, new, Clone)]
|
||||
@@ -69,5 +71,14 @@ pub enum HostRole {
|
||||
Bootstrap,
|
||||
ControlPlane,
|
||||
Worker,
|
||||
Storage,
|
||||
}
|
||||
|
||||
impl fmt::Display for HostRole {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
HostRole::Bootstrap => write!(f, "Bootstrap"),
|
||||
HostRole::ControlPlane => write!(f, "ControlPlane"),
|
||||
HostRole::Worker => write!(f, "Worker"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
19
harmony/src/domain/topology/failover.rs
Normal file
19
harmony/src/domain/topology/failover.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::topology::{PreparationError, PreparationOutcome, Topology};
|
||||
|
||||
pub struct FailoverTopology<T> {
|
||||
pub primary: T,
|
||||
pub replica: T,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Send + Sync> Topology for FailoverTopology<T> {
|
||||
fn name(&self) -> &str {
|
||||
"FailoverTopology"
|
||||
}
|
||||
|
||||
async fn ensure_ready(&self) -> Result<PreparationOutcome, PreparationError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,32 +1,25 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_macros::ip;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{MacAddress, Url},
|
||||
switch::PortLocation,
|
||||
};
|
||||
use kube::api::ObjectMeta;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
|
||||
use crate::topology::PxeOptions;
|
||||
use crate::{data::FileContent, modules::okd::crd::nmstate::NMState};
|
||||
use crate::{
|
||||
executors::ExecutorError, modules::okd::crd::nmstate::NodeNetworkConfigurationPolicySpec,
|
||||
};
|
||||
use crate::{
|
||||
modules::okd::crd::nmstate::{self, NodeNetworkConfigurationPolicy},
|
||||
topology::node_exporter::NodeExporter,
|
||||
};
|
||||
use crate::{data::FileContent, executors::ExecutorError, topology::node_exporter::NodeExporter};
|
||||
use crate::{infra::network_manager::OpenShiftNmStateNetworkManager, topology::PortConfig};
|
||||
use crate::{modules::inventory::HarmonyDiscoveryStrategy, topology::PxeOptions};
|
||||
|
||||
use super::{
|
||||
DHCPStaticEntry, DhcpServer, DnsRecord, DnsRecordType, DnsServer, Firewall, HostNetworkConfig,
|
||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost,
|
||||
PreparationError, PreparationOutcome, Router, Switch, SwitchClient, SwitchError, TftpServer,
|
||||
Topology, k8s::K8sClient,
|
||||
HttpServer, IpAddress, K8sclient, LoadBalancer, LoadBalancerService, LogicalHost, NetworkError,
|
||||
NetworkManager, PreparationError, PreparationOutcome, Router, Switch, SwitchClient,
|
||||
SwitchError, TftpServer, Topology, k8s::K8sClient,
|
||||
};
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HAClusterTopology {
|
||||
@@ -44,6 +37,7 @@ pub struct HAClusterTopology {
|
||||
pub control_plane: Vec<LogicalHost>,
|
||||
pub workers: Vec<LogicalHost>,
|
||||
pub kubeconfig: Option<String>,
|
||||
pub network_manager: OnceLock<Arc<dyn NetworkManager>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -67,7 +61,7 @@ impl K8sclient for HAClusterTopology {
|
||||
K8sClient::try_default().await.map_err(|e| e.to_string())?,
|
||||
)),
|
||||
Some(kubeconfig) => {
|
||||
let Some(client) = K8sClient::from_kubeconfig(&kubeconfig).await else {
|
||||
let Some(client) = K8sClient::from_kubeconfig(kubeconfig).await else {
|
||||
return Err("Failed to create k8s client".to_string());
|
||||
};
|
||||
Ok(Arc::new(client))
|
||||
@@ -97,191 +91,12 @@ impl HAClusterTopology {
|
||||
.to_string()
|
||||
}
|
||||
|
||||
async fn ensure_nmstate_operator_installed(&self) -> Result<(), String> {
|
||||
let k8s_client = self.k8s_client().await?;
|
||||
pub async fn network_manager(&self) -> &dyn NetworkManager {
|
||||
let k8s_client = self.k8s_client().await.unwrap();
|
||||
|
||||
debug!("Installing NMState controller...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
debug!("Creating NMState namespace...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
debug!("Creating NMState service account...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
debug!("Creating NMState role...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
debug!("Creating NMState role binding...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
debug!("Creating NMState operator...");
|
||||
k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
k8s_client
|
||||
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||
.await?;
|
||||
|
||||
let nmstate = NMState {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
debug!("Creating NMState: {nmstate:#?}");
|
||||
k8s_client
|
||||
.apply(&nmstate, None)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_next_bond_id(&self) -> u8 {
|
||||
42 // FIXME: Find a better way to declare the bond id
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
self.ensure_nmstate_operator_installed()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
SwitchError::new(format!(
|
||||
"Can't configure bond, NMState operator not available: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let bond_config = self.create_bond_configuration(config);
|
||||
debug!(
|
||||
"Applying NMState bond config for host {}: {bond_config:#?}",
|
||||
config.host_id
|
||||
);
|
||||
self.k8s_client()
|
||||
.await
|
||||
.unwrap()
|
||||
.apply(&bond_config, None)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure bond: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_bond_configuration(
|
||||
&self,
|
||||
config: &HostNetworkConfig,
|
||||
) -> NodeNetworkConfigurationPolicy {
|
||||
let host_name = &config.host_id;
|
||||
let bond_id = self.get_next_bond_id();
|
||||
let bond_name = format!("bond{bond_id}");
|
||||
|
||||
info!("Configuring bond '{bond_name}' for host '{host_name}'...");
|
||||
|
||||
let mut bond_mtu: Option<u32> = None;
|
||||
let mut copy_mac_from: Option<String> = None;
|
||||
let mut bond_ports = Vec::new();
|
||||
let mut interfaces: Vec<nmstate::InterfaceSpec> = Vec::new();
|
||||
|
||||
for switch_port in &config.switch_ports {
|
||||
let interface_name = switch_port.interface.name.clone();
|
||||
|
||||
interfaces.push(nmstate::InterfaceSpec {
|
||||
name: interface_name.clone(),
|
||||
description: Some(format!("Member of bond {bond_name}")),
|
||||
r#type: "ethernet".to_string(),
|
||||
state: "up".to_string(),
|
||||
mtu: Some(switch_port.interface.mtu),
|
||||
mac_address: Some(switch_port.interface.mac_address.to_string()),
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: None,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
bond_ports.push(interface_name.clone());
|
||||
|
||||
// Use the first port's details for the bond mtu and mac address
|
||||
if bond_mtu.is_none() {
|
||||
bond_mtu = Some(switch_port.interface.mtu);
|
||||
}
|
||||
if copy_mac_from.is_none() {
|
||||
copy_mac_from = Some(interface_name);
|
||||
}
|
||||
}
|
||||
|
||||
interfaces.push(nmstate::InterfaceSpec {
|
||||
name: bond_name.clone(),
|
||||
description: Some(format!("Network bond for host {host_name}")),
|
||||
r#type: "bond".to_string(),
|
||||
state: "up".to_string(),
|
||||
copy_mac_from,
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
autoconf: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: Some(nmstate::BondSpec {
|
||||
mode: "802.3ad".to_string(),
|
||||
ports: bond_ports,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
NodeNetworkConfigurationPolicy {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{host_name}-bond-config")),
|
||||
..Default::default()
|
||||
},
|
||||
spec: NodeNetworkConfigurationPolicySpec {
|
||||
node_selector: Some(BTreeMap::from([(
|
||||
"kubernetes.io/hostname".to_string(),
|
||||
host_name.to_string(),
|
||||
)])),
|
||||
desired_state: nmstate::DesiredStateSpec { interfaces },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
debug!("Configuring port channel: {config:#?}");
|
||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||
|
||||
self.switch_client
|
||||
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure switch: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
self.network_manager
|
||||
.get_or_init(|| Arc::new(OpenShiftNmStateNetworkManager::new(k8s_client.clone())))
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
pub fn autoload() -> Self {
|
||||
@@ -306,6 +121,7 @@ impl HAClusterTopology {
|
||||
bootstrap_host: dummy_host,
|
||||
control_plane: vec![],
|
||||
workers: vec![],
|
||||
network_manager: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -463,21 +279,47 @@ impl HttpServer for HAClusterTopology {
|
||||
#[async_trait]
|
||||
impl Switch for HAClusterTopology {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
self.switch_client.setup().await?;
|
||||
Ok(())
|
||||
self.switch_client.setup().await.map(|_| ())
|
||||
}
|
||||
|
||||
async fn get_port_for_mac_address(
|
||||
&self,
|
||||
mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError> {
|
||||
let port = self.switch_client.find_port(mac_address).await?;
|
||||
Ok(port)
|
||||
self.switch_client.find_port(mac_address).await
|
||||
}
|
||||
|
||||
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
self.configure_bond(config).await?;
|
||||
self.configure_port_channel(config).await
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError> {
|
||||
debug!("Configuring port channel: {config:#?}");
|
||||
let switch_ports = config.switch_ports.iter().map(|s| s.port.clone()).collect();
|
||||
|
||||
self.switch_client
|
||||
.configure_port_channel(&format!("Harmony_{}", config.host_id), switch_ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(format!("Failed to configure port-channel: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for HAClusterTopology {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
self.network_manager()
|
||||
.await
|
||||
.ensure_network_manager_installed()
|
||||
.await
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
self.network_manager().await.configure_bond(config).await
|
||||
}
|
||||
|
||||
//TODO add snmp here
|
||||
@@ -720,4 +562,10 @@ impl SwitchClient for DummyInfra {
|
||||
) -> Result<u8, SwitchError> {
|
||||
unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,15 @@ use k8s_openapi::{
|
||||
ClusterResourceScope, NamespaceResourceScope,
|
||||
api::{
|
||||
apps::v1::Deployment,
|
||||
core::v1::{Pod, ServiceAccount},
|
||||
core::v1::{Node, Pod, ServiceAccount},
|
||||
},
|
||||
apimachinery::pkg::version::Info,
|
||||
};
|
||||
use kube::{
|
||||
Client, Config, Discovery, Error, Resource,
|
||||
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
|
||||
api::{
|
||||
Api, AttachParams, DeleteParams, ListParams, ObjectList, Patch, PatchParams, ResourceExt,
|
||||
},
|
||||
config::{KubeConfigOptions, Kubeconfig},
|
||||
core::ErrorResponse,
|
||||
discovery::{ApiCapabilities, Scope},
|
||||
@@ -23,7 +25,7 @@ use kube::{
|
||||
api::{ApiResource, GroupVersionKind},
|
||||
runtime::wait::await_condition,
|
||||
};
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use log::{debug, error, trace, warn};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::json;
|
||||
use similar::TextDiff;
|
||||
@@ -564,7 +566,58 @@ impl K8sClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
/// Gets a single named resource of a specific type `K`.
|
||||
///
|
||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||
/// whether to look in a specific namespace or in the entire cluster.
|
||||
///
|
||||
/// Returns `Ok(None)` if the resource is not found (404).
|
||||
pub async fn get_resource<K>(
|
||||
&self,
|
||||
name: &str,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<Option<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
let api: Api<K> =
|
||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||
|
||||
api.get_opt(name).await
|
||||
}
|
||||
|
||||
/// Lists all resources of a specific type `K`.
|
||||
///
|
||||
/// This function uses the `ApplyStrategy` trait to correctly determine
|
||||
/// whether to list from a specific namespace or from the entire cluster.
|
||||
pub async fn list_resources<K>(
|
||||
&self,
|
||||
namespace: Option<&str>,
|
||||
list_params: Option<ListParams>,
|
||||
) -> Result<ObjectList<K>, Error>
|
||||
where
|
||||
K: Resource + Clone + std::fmt::Debug + DeserializeOwned,
|
||||
<K as Resource>::Scope: ApplyStrategy<K>,
|
||||
<K as kube::Resource>::DynamicType: Default,
|
||||
{
|
||||
let api: Api<K> =
|
||||
<<K as Resource>::Scope as ApplyStrategy<K>>::get_api(&self.client, namespace);
|
||||
|
||||
let list_params = list_params.unwrap_or_default();
|
||||
api.list(&list_params).await
|
||||
}
|
||||
|
||||
/// Fetches a list of all Nodes in the cluster.
|
||||
pub async fn get_nodes(
|
||||
&self,
|
||||
list_params: Option<ListParams>,
|
||||
) -> Result<ObjectList<Node>, Error> {
|
||||
self.list_resources(None, list_params).await
|
||||
}
|
||||
|
||||
pub async fn from_kubeconfig(path: &str) -> Option<K8sClient> {
|
||||
let k = match Kubeconfig::read_from(path) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
mod failover;
|
||||
mod ha_cluster;
|
||||
pub mod ingress;
|
||||
pub mod node_exporter;
|
||||
pub use failover::*;
|
||||
use harmony_types::net::IpAddress;
|
||||
mod host_binding;
|
||||
mod http;
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::{
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::PortOperatingMode;
|
||||
use derive_new::new;
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
@@ -15,7 +16,7 @@ use harmony_types::{
|
||||
};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{executors::ExecutorError, hardware::PhysicalHost};
|
||||
use crate::executors::ExecutorError;
|
||||
|
||||
use super::{LogicalHost, k8s::K8sClient};
|
||||
|
||||
@@ -183,6 +184,39 @@ impl FromStr for DnsRecordType {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait NetworkManager: Debug + Send + Sync {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError>;
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, new)]
|
||||
pub struct NetworkError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl fmt::Display for NetworkError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(&self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for NetworkError {}
|
||||
|
||||
impl From<kube::Error> for NetworkError {
|
||||
fn from(value: kube::Error) -> Self {
|
||||
NetworkError::new(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for NetworkError {
|
||||
fn from(value: String) -> Self {
|
||||
NetworkError::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
pub type PortConfig = (PortLocation, PortOperatingMode);
|
||||
|
||||
#[async_trait]
|
||||
pub trait Switch: Send + Sync {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError>;
|
||||
@@ -192,7 +226,9 @@ pub trait Switch: Send + Sync {
|
||||
mac_address: &MacAddress,
|
||||
) -> Result<Option<PortLocation>, SwitchError>;
|
||||
|
||||
async fn configure_host_network(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||
async fn configure_port_channel(&self, config: &HostNetworkConfig) -> Result<(), SwitchError>;
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
@@ -252,6 +288,9 @@ pub trait SwitchClient: Debug + Send + Sync {
|
||||
channel_name: &str,
|
||||
switch_ports: Vec<PortLocation>,
|
||||
) -> Result<u8, SwitchError>;
|
||||
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError>;
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -14,7 +14,7 @@ use k8s_openapi::{
|
||||
},
|
||||
apimachinery::pkg::util::intstr::IntOrString,
|
||||
};
|
||||
use kube::Resource;
|
||||
use kube::{Resource, api::DynamicObject};
|
||||
use log::debug;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::json;
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use async_trait::async_trait;
|
||||
use brocade::{BrocadeClient, BrocadeOptions, InterSwitchLink, InterfaceStatus, PortOperatingMode};
|
||||
use harmony_types::{
|
||||
id::Id,
|
||||
net::{IpAddress, MacAddress},
|
||||
switch::{PortDeclaration, PortLocation},
|
||||
};
|
||||
use option_ext::OptionExt;
|
||||
|
||||
use crate::topology::{SwitchClient, SwitchError};
|
||||
use crate::topology::{PortConfig, SwitchClient, SwitchError};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrocadeSwitchClient {
|
||||
@@ -18,9 +19,9 @@ impl BrocadeSwitchClient {
|
||||
ip_addresses: &[IpAddress],
|
||||
username: &str,
|
||||
password: &str,
|
||||
options: Option<BrocadeOptions>,
|
||||
options: BrocadeOptions,
|
||||
) -> Result<Self, brocade::Error> {
|
||||
let brocade = brocade::init(ip_addresses, 22, username, password, options).await?;
|
||||
let brocade = brocade::init(ip_addresses, username, password, options).await?;
|
||||
Ok(Self { brocade })
|
||||
}
|
||||
}
|
||||
@@ -59,7 +60,7 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
}
|
||||
|
||||
self.brocade
|
||||
.configure_interfaces(interfaces)
|
||||
.configure_interfaces(&interfaces)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
|
||||
@@ -111,6 +112,27 @@ impl SwitchClient for BrocadeSwitchClient {
|
||||
|
||||
Ok(channel_id)
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
for i in ids {
|
||||
self.brocade
|
||||
.clear_port_channel(&i.to_string())
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn configure_interface(&self, ports: &Vec<PortConfig>) -> Result<(), SwitchError> {
|
||||
// FIXME hardcoded TenGigabitEthernet = bad
|
||||
let ports = ports
|
||||
.iter()
|
||||
.map(|p| (format!("TenGigabitEthernet {}", p.0), p.1.clone()))
|
||||
.collect();
|
||||
self.brocade
|
||||
.configure_interfaces(&ports)
|
||||
.await
|
||||
.map_err(|e| SwitchError::new(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -121,7 +143,7 @@ mod tests {
|
||||
use async_trait::async_trait;
|
||||
use brocade::{
|
||||
BrocadeClient, BrocadeInfo, Error, InterSwitchLink, InterfaceInfo, InterfaceStatus,
|
||||
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode,
|
||||
InterfaceType, MacAddressEntry, PortChannelId, PortOperatingMode, SecurityLevel,
|
||||
};
|
||||
use harmony_types::switch::PortLocation;
|
||||
|
||||
@@ -145,6 +167,7 @@ mod tests {
|
||||
|
||||
client.setup().await.unwrap();
|
||||
|
||||
//TODO not sure about this
|
||||
let configured_interfaces = brocade.configured_interfaces.lock().unwrap();
|
||||
assert_that!(*configured_interfaces).contains_exactly(vec![
|
||||
(first_interface.name.clone(), PortOperatingMode::Access),
|
||||
@@ -255,10 +278,10 @@ mod tests {
|
||||
|
||||
async fn configure_interfaces(
|
||||
&self,
|
||||
interfaces: Vec<(String, PortOperatingMode)>,
|
||||
interfaces: &Vec<(String, PortOperatingMode)>,
|
||||
) -> Result<(), Error> {
|
||||
let mut configured_interfaces = self.configured_interfaces.lock().unwrap();
|
||||
*configured_interfaces = interfaces;
|
||||
*configured_interfaces = interfaces.clone();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -279,6 +302,10 @@ mod tests {
|
||||
async fn clear_port_channel(&self, _channel_name: &str) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn enable_snmp(&self, user_name: &str, auth: &str, des: &str) -> Result<(), Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl FakeBrocadeClient {
|
||||
|
||||
182
harmony/src/infra/kube.rs
Normal file
182
harmony/src/infra/kube.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use k8s_openapi::Resource as K8sResource;
|
||||
use kube::api::{ApiResource, DynamicObject, GroupVersionKind};
|
||||
use kube::core::TypeMeta;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
|
||||
/// Convert a typed Kubernetes resource `K` into a `DynamicObject`.
|
||||
///
|
||||
/// Requirements:
|
||||
/// - `K` must be a k8s_openapi resource (provides static GVK via `Resource`).
|
||||
/// - `K` must have standard Kubernetes shape (metadata + payload fields).
|
||||
///
|
||||
/// Notes:
|
||||
/// - We set `types` (apiVersion/kind) and copy `metadata`.
|
||||
/// - We place the remaining top-level fields into `obj.data` as JSON.
|
||||
/// - Scope is not encoded on the object itself; you still need the corresponding
|
||||
/// `DynamicResource` (derived from K::group/version/kind) when constructing an Api.
|
||||
///
|
||||
/// Example usage:
|
||||
/// let dyn_obj = kube_resource_to_dynamic(secret)?;
|
||||
/// let api: Api<DynamicObject> = Api::namespaced_with(client, "ns", &dr);
|
||||
/// api.patch(&dyn_obj.name_any(), &PatchParams::apply("mgr"), &Patch::Apply(dyn_obj)).await?;
|
||||
pub fn kube_resource_to_dynamic<K>(res: &K) -> Result<DynamicObject, String>
|
||||
where
|
||||
K: K8sResource + Serialize + DeserializeOwned,
|
||||
{
|
||||
// Serialize the typed resource to JSON so we can split metadata and payload
|
||||
let mut v = serde_json::to_value(res).map_err(|e| format!("Failed to serialize : {e}"))?;
|
||||
let obj = v
|
||||
.as_object_mut()
|
||||
.ok_or_else(|| "expected object JSON".to_string())?;
|
||||
|
||||
// Extract and parse metadata into kube::core::ObjectMeta
|
||||
let metadata_value = obj
|
||||
.remove("metadata")
|
||||
.ok_or_else(|| "missing metadata".to_string())?;
|
||||
let metadata: kube::core::ObjectMeta = serde_json::from_value(metadata_value)
|
||||
.map_err(|e| format!("Failed to deserialize : {e}"))?;
|
||||
|
||||
// Name is required for DynamicObject::new; prefer metadata.name
|
||||
let name = metadata
|
||||
.name
|
||||
.clone()
|
||||
.ok_or_else(|| "metadata.name is required".to_string())?;
|
||||
|
||||
// Remaining fields (spec/status/data/etc.) become the dynamic payload
|
||||
let payload = Value::Object(obj.clone());
|
||||
|
||||
// Construct the DynamicObject
|
||||
let mut dyn_obj = DynamicObject::new(
|
||||
&name,
|
||||
&ApiResource::from_gvk(&GroupVersionKind::gvk(K::GROUP, K::VERSION, K::KIND)),
|
||||
);
|
||||
dyn_obj.types = Some(TypeMeta {
|
||||
api_version: api_version_for::<K>(),
|
||||
kind: K::KIND.into(),
|
||||
});
|
||||
|
||||
// Preserve namespace/labels/annotations/etc.
|
||||
dyn_obj.metadata = metadata;
|
||||
|
||||
// Attach payload
|
||||
dyn_obj.data = payload;
|
||||
|
||||
Ok(dyn_obj)
|
||||
}
|
||||
|
||||
/// Helper: compute apiVersion string ("group/version" or "v1" for core).
|
||||
fn api_version_for<K>() -> String
|
||||
where
|
||||
K: K8sResource,
|
||||
{
|
||||
let group = K::GROUP;
|
||||
let version = K::VERSION;
|
||||
if group.is_empty() {
|
||||
version.to_string() // core/v1 => "v1"
|
||||
} else {
|
||||
format!("{}/{}", group, version)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use k8s_openapi::api::{
|
||||
apps::v1::{Deployment, DeploymentSpec},
|
||||
core::v1::{PodTemplateSpec, Secret},
|
||||
};
|
||||
use kube::api::ObjectMeta;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn secret_to_dynamic_roundtrip() {
|
||||
// Create a sample Secret resource
|
||||
let mut secret = Secret {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-secret".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
type_: Some("kubernetes.io/service-account-token".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Convert to DynamicResource
|
||||
let dynamic: DynamicObject =
|
||||
kube_resource_to_dynamic(&secret).expect("Failed to convert Secret to DynamicResource");
|
||||
|
||||
// Serialize both the original and dynamic resources to Value
|
||||
let original_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
let dynamic_value =
|
||||
serde_json::to_value(&dynamic).expect("Failed to serialize DynamicResource");
|
||||
|
||||
// Assert that they are identical
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
secret.metadata.namespace = Some("false".to_string());
|
||||
let modified_value = serde_json::to_value(&secret).expect("Failed to serialize Secret");
|
||||
assert_ne!(modified_value, dynamic_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deployment_to_dynamic_roundtrip() {
|
||||
// Create a sample Deployment with nested structures
|
||||
let deployment = Deployment {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("my-deployment".to_string()),
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
spec: Some(DeploymentSpec {
|
||||
replicas: Some(3),
|
||||
selector: Default::default(),
|
||||
template: PodTemplateSpec {
|
||||
metadata: Some(ObjectMeta {
|
||||
labels: Some({
|
||||
let mut map = std::collections::BTreeMap::new();
|
||||
map.insert("app".to_string(), "nginx".to_string());
|
||||
map
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
spec: Some(Default::default()), // PodSpec with empty containers for simplicity
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let dynamic = kube_resource_to_dynamic(&deployment).expect("Failed to convert Deployment");
|
||||
|
||||
let original_value = serde_json::to_value(&deployment).unwrap();
|
||||
let dynamic_value = serde_json::to_value(&dynamic).unwrap();
|
||||
|
||||
assert_eq!(original_value, dynamic_value);
|
||||
|
||||
assert_eq!(
|
||||
dynamic.data.get("spec").unwrap().get("replicas").unwrap(),
|
||||
3
|
||||
);
|
||||
assert_eq!(
|
||||
dynamic
|
||||
.data
|
||||
.get("spec")
|
||||
.unwrap()
|
||||
.get("template")
|
||||
.unwrap()
|
||||
.get("metadata")
|
||||
.unwrap()
|
||||
.get("labels")
|
||||
.unwrap()
|
||||
.get("app")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"nginx".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -3,5 +3,7 @@ pub mod executors;
|
||||
pub mod hp_ilo;
|
||||
pub mod intel_amt;
|
||||
pub mod inventory;
|
||||
pub mod kube;
|
||||
pub mod network_manager;
|
||||
pub mod opnsense;
|
||||
mod sqlx;
|
||||
|
||||
264
harmony/src/infra/network_manager.rs
Normal file
264
harmony/src/infra/network_manager.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use k8s_openapi::api::core::v1::Node;
|
||||
use kube::{
|
||||
ResourceExt,
|
||||
api::{ObjectList, ObjectMeta},
|
||||
};
|
||||
use log::{debug, info};
|
||||
|
||||
use crate::{
|
||||
modules::okd::crd::nmstate,
|
||||
topology::{HostNetworkConfig, NetworkError, NetworkManager, k8s::K8sClient},
|
||||
};
|
||||
|
||||
/// TODO document properly the non-intuitive behavior or "roll forward only" of nmstate in general
|
||||
/// It is documented in nmstate official doc, but worth mentionning here :
|
||||
///
|
||||
/// - You create a bond, nmstate will apply it
|
||||
/// - You delete de bond from nmstate, it will NOT delete it
|
||||
/// - To delete it you have to update it with configuration set to null
|
||||
pub struct OpenShiftNmStateNetworkManager {
|
||||
k8s_client: Arc<K8sClient>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OpenShiftNmStateNetworkManager {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OpenShiftNmStateNetworkManager").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for OpenShiftNmStateNetworkManager {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
debug!("Installing NMState controller...");
|
||||
// TODO use operatorhub maybe?
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/nmstate.io_nmstates.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState namespace...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/namespace.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState service account...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/service_account.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState role...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState role binding...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/role_binding.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
debug!("Creating NMState operator...");
|
||||
self.k8s_client.apply_url(url::Url::parse("https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.84.0/operator.yaml
|
||||
").unwrap(), Some("nmstate"))
|
||||
.await?;
|
||||
|
||||
self.k8s_client
|
||||
.wait_until_deployment_ready("nmstate-operator", Some("nmstate"), None)
|
||||
.await?;
|
||||
|
||||
let nmstate = nmstate::NMState {
|
||||
metadata: ObjectMeta {
|
||||
name: Some("nmstate".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
debug!(
|
||||
"Creating NMState:\n{}",
|
||||
serde_yaml::to_string(&nmstate).unwrap()
|
||||
);
|
||||
self.k8s_client.apply(&nmstate, None).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
let hostname = self.get_hostname(&config.host_id).await.map_err(|e| {
|
||||
NetworkError::new(format!(
|
||||
"Can't configure bond, can't get hostname for host '{}': {e}",
|
||||
config.host_id
|
||||
))
|
||||
})?;
|
||||
let bond_id = self.get_next_bond_id(&hostname).await.map_err(|e| {
|
||||
NetworkError::new(format!(
|
||||
"Can't configure bond, can't get an available bond id for host '{}': {e}",
|
||||
config.host_id
|
||||
))
|
||||
})?;
|
||||
let bond_config = self.create_bond_configuration(&hostname, &bond_id, config);
|
||||
|
||||
debug!(
|
||||
"Applying NMState bond config for host {}:\n{}",
|
||||
config.host_id,
|
||||
serde_yaml::to_string(&bond_config).unwrap(),
|
||||
);
|
||||
self.k8s_client
|
||||
.apply(&bond_config, None)
|
||||
.await
|
||||
.map_err(|e| NetworkError::new(format!("Failed to configure bond: {e}")))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenShiftNmStateNetworkManager {
|
||||
pub fn new(k8s_client: Arc<K8sClient>) -> Self {
|
||||
Self { k8s_client }
|
||||
}
|
||||
|
||||
fn create_bond_configuration(
|
||||
&self,
|
||||
host: &str,
|
||||
bond_name: &str,
|
||||
config: &HostNetworkConfig,
|
||||
) -> nmstate::NodeNetworkConfigurationPolicy {
|
||||
info!("Configuring bond '{bond_name}' for host '{host}'...");
|
||||
|
||||
let mut bond_mtu: Option<u32> = None;
|
||||
let mut copy_mac_from: Option<String> = None;
|
||||
let mut bond_ports = Vec::new();
|
||||
let mut interfaces: Vec<nmstate::Interface> = Vec::new();
|
||||
|
||||
for switch_port in &config.switch_ports {
|
||||
let interface_name = switch_port.interface.name.clone();
|
||||
|
||||
interfaces.push(nmstate::Interface {
|
||||
name: interface_name.clone(),
|
||||
description: Some(format!("Member of bond {bond_name}")),
|
||||
r#type: nmstate::InterfaceType::Ethernet,
|
||||
state: "up".to_string(),
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
enabled: Some(false),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: None,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
bond_ports.push(interface_name.clone());
|
||||
|
||||
// Use the first port's details for the bond mtu and mac address
|
||||
if bond_mtu.is_none() {
|
||||
bond_mtu = Some(switch_port.interface.mtu);
|
||||
}
|
||||
if copy_mac_from.is_none() {
|
||||
copy_mac_from = Some(interface_name);
|
||||
}
|
||||
}
|
||||
|
||||
interfaces.push(nmstate::Interface {
|
||||
name: bond_name.to_string(),
|
||||
description: Some(format!("HARMONY - Network bond for host {host}")),
|
||||
r#type: nmstate::InterfaceType::Bond,
|
||||
state: "up".to_string(),
|
||||
copy_mac_from,
|
||||
ipv4: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
ipv6: Some(nmstate::IpStackSpec {
|
||||
dhcp: Some(true),
|
||||
autoconf: Some(true),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}),
|
||||
link_aggregation: Some(nmstate::BondSpec {
|
||||
mode: "802.3ad".to_string(),
|
||||
ports: bond_ports,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
nmstate::NodeNetworkConfigurationPolicy {
|
||||
metadata: ObjectMeta {
|
||||
name: Some(format!("{host}-bond-config")),
|
||||
..Default::default()
|
||||
},
|
||||
spec: nmstate::NodeNetworkConfigurationPolicySpec {
|
||||
node_selector: Some(BTreeMap::from([(
|
||||
"kubernetes.io/hostname".to_string(),
|
||||
host.to_string(),
|
||||
)])),
|
||||
desired_state: nmstate::NetworkState {
|
||||
interfaces,
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_hostname(&self, host_id: &Id) -> Result<String, String> {
|
||||
let nodes: ObjectList<Node> = self
|
||||
.k8s_client
|
||||
.list_resources(None, None)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||
|
||||
let Some(node) = nodes.iter().find(|n| {
|
||||
n.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.node_info.as_ref())
|
||||
.map(|i| i.system_uuid == host_id.to_string())
|
||||
.unwrap_or(false)
|
||||
}) else {
|
||||
return Err(format!("No node found for host '{host_id}'"));
|
||||
};
|
||||
|
||||
node.labels()
|
||||
.get("kubernetes.io/hostname")
|
||||
.ok_or(format!(
|
||||
"Node '{host_id}' has no kubernetes.io/hostname label"
|
||||
))
|
||||
.cloned()
|
||||
}
|
||||
|
||||
async fn get_next_bond_id(&self, hostname: &str) -> Result<String, String> {
|
||||
let network_state: Option<nmstate::NodeNetworkState> = self
|
||||
.k8s_client
|
||||
.get_resource(hostname, None)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list nodes: {e}"))?;
|
||||
|
||||
let interfaces = vec![];
|
||||
let existing_bonds: Vec<&nmstate::Interface> = network_state
|
||||
.as_ref()
|
||||
.and_then(|network_state| network_state.status.current_state.as_ref())
|
||||
.map_or(&interfaces, |current_state| ¤t_state.interfaces)
|
||||
.iter()
|
||||
.filter(|i| i.r#type == nmstate::InterfaceType::Bond)
|
||||
.collect();
|
||||
|
||||
let used_ids: HashSet<u32> = existing_bonds
|
||||
.iter()
|
||||
.filter_map(|i| {
|
||||
i.name
|
||||
.strip_prefix("bond")
|
||||
.and_then(|id| id.parse::<u32>().ok())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_id = (0..).find(|id| !used_ids.contains(id)).unwrap();
|
||||
Ok(format!("bond{next_id}"))
|
||||
}
|
||||
}
|
||||
@@ -9,7 +9,6 @@ mod tftp;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use management::*;
|
||||
use opnsense_config_xml::Host;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{executors::ExecutorError, topology::LogicalHost};
|
||||
|
||||
116
harmony/src/modules/brocade.rs
Normal file
116
harmony/src/modules/brocade.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use brocade::BrocadeOptions;
|
||||
use harmony_secret::{Secret, SecretManager};
|
||||
use harmony_types::id::Id;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BrocadeEnableSnmpScore {
|
||||
pub switch_ips: Vec<IpAddr>,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for BrocadeEnableSnmpScore {
|
||||
fn name(&self) -> String {
|
||||
"BrocadeEnableSnmpScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(BrocadeEnableSnmpInterpret {
|
||||
score: self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BrocadeEnableSnmpInterpret {
|
||||
score: BrocadeEnableSnmpScore,
|
||||
}
|
||||
|
||||
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||
struct BrocadeSwitchAuth {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
#[derive(Secret, Clone, Debug, Serialize, Deserialize)]
|
||||
struct BrocadeSnmpAuth {
|
||||
username: String,
|
||||
auth_password: String,
|
||||
des_password: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for BrocadeEnableSnmpInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
let switch_addresses = &self.score.switch_ips;
|
||||
|
||||
let snmp_auth = SecretManager::get_or_prompt::<BrocadeSnmpAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let config = SecretManager::get_or_prompt::<BrocadeSwitchAuth>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let brocade = brocade::init(
|
||||
&switch_addresses,
|
||||
&config.username,
|
||||
&config.password,
|
||||
BrocadeOptions {
|
||||
dry_run: self.score.dry_run,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Brocade client failed to connect");
|
||||
|
||||
brocade
|
||||
.enable_snmp(
|
||||
&snmp_auth.username,
|
||||
&snmp_auth.auth_password,
|
||||
&snmp_auth.des_password,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(e.to_string()))?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Activated snmp server for Brocade at {}",
|
||||
switch_addresses
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("BrocadeEnableSnmpInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -19,8 +19,11 @@ pub struct DhcpScore {
|
||||
pub host_binding: Vec<HostBinding>,
|
||||
pub next_server: Option<IpAddress>,
|
||||
pub boot_filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as BIOS
|
||||
pub filename: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as uefi but NOT iPXE
|
||||
pub filename64: Option<String>,
|
||||
/// Boot filename to be provided to PXE clients identifying as iPXE
|
||||
pub filenameipxe: Option<String>,
|
||||
pub dhcp_range: (IpAddress, IpAddress),
|
||||
pub domain: Option<String>,
|
||||
|
||||
@@ -5,11 +5,10 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::LaunchDiscoverInventoryAgentScore,
|
||||
modules::inventory::{HarmonyDiscoveryStrategy, LaunchDiscoverInventoryAgentScore},
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
@@ -17,11 +16,13 @@ use crate::{
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DiscoverHostForRoleScore {
|
||||
pub role: HostRole,
|
||||
pub number_desired_hosts: i16,
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for DiscoverHostForRoleScore {
|
||||
fn name(&self) -> String {
|
||||
"DiscoverInventoryAgentScore".to_string()
|
||||
format!("DiscoverHostForRoleScore({:?})", self.role)
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
@@ -48,13 +49,15 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
);
|
||||
LaunchDiscoverInventoryAgentScore {
|
||||
discovery_timeout: None,
|
||||
discovery_strategy: self.score.discovery_strategy.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
let host: PhysicalHost;
|
||||
let mut chosen_hosts = vec![];
|
||||
let host_repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
let mut assigned_hosts = 0;
|
||||
loop {
|
||||
let all_hosts = host_repo.get_all_hosts().await?;
|
||||
|
||||
@@ -74,12 +77,25 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
|
||||
match ans {
|
||||
Ok(choice) => {
|
||||
info!("Selected {} as the bootstrap node.", choice.summary());
|
||||
info!(
|
||||
"Assigned role {:?} for node {}",
|
||||
self.score.role,
|
||||
choice.summary()
|
||||
);
|
||||
host_repo
|
||||
.save_role_mapping(&self.score.role, &choice)
|
||||
.await?;
|
||||
host = choice;
|
||||
break;
|
||||
chosen_hosts.push(choice);
|
||||
assigned_hosts += 1;
|
||||
|
||||
info!(
|
||||
"Found {assigned_hosts} hosts for role {:?}",
|
||||
self.score.role
|
||||
);
|
||||
|
||||
if assigned_hosts == self.score.number_desired_hosts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(inquire::InquireError::OperationCanceled) => {
|
||||
info!("Refresh requested. Fetching list of discovered hosts again...");
|
||||
@@ -90,17 +106,19 @@ impl<T: Topology> Interpret<T> for DiscoverHostForRoleInterpret {
|
||||
"Failed to select node for role {:?} : {}",
|
||||
self.score.role, e
|
||||
);
|
||||
return Err(InterpretError::new(format!(
|
||||
"Could not select host : {}",
|
||||
e.to_string()
|
||||
)));
|
||||
return Err(InterpretError::new(format!("Could not select host : {e}")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Successfully discovered host {} for role {:?}",
|
||||
host.summary(),
|
||||
"Successfully discovered {} hosts {} for role {:?}",
|
||||
self.score.number_desired_hosts,
|
||||
chosen_hosts
|
||||
.iter()
|
||||
.map(|h| h.summary())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
self.score.role
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
mod discovery;
|
||||
pub mod inspect;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use cidr::{Ipv4Cidr, Ipv4Inet};
|
||||
pub use discovery::*;
|
||||
use tokio::time::{Duration, timeout};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use harmony_inventory_agent::local_presence::DiscoveryEvent;
|
||||
@@ -24,6 +28,7 @@ use harmony_types::id::Id;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LaunchDiscoverInventoryAgentScore {
|
||||
pub discovery_timeout: Option<u64>,
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl<T: Topology> Score<T> for LaunchDiscoverInventoryAgentScore {
|
||||
@@ -43,6 +48,12 @@ struct DiscoverInventoryAgentInterpret {
|
||||
score: LaunchDiscoverInventoryAgentScore,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum HarmonyDiscoveryStrategy {
|
||||
MDNS,
|
||||
SUBNET { cidr: cidr::Ipv4Cidr, port: u16 },
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
async fn execute(
|
||||
@@ -57,6 +68,37 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
),
|
||||
};
|
||||
|
||||
match self.score.discovery_strategy {
|
||||
HarmonyDiscoveryStrategy::MDNS => self.launch_mdns_discovery().await,
|
||||
HarmonyDiscoveryStrategy::SUBNET { cidr, port } => {
|
||||
self.launch_cidr_discovery(&cidr, port).await
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscoverInventoryAgentInterpret {
|
||||
async fn launch_mdns_discovery(&self) {
|
||||
harmony_inventory_agent::local_presence::discover_agents(
|
||||
self.score.discovery_timeout,
|
||||
|event: DiscoveryEvent| -> Result<(), String> {
|
||||
@@ -112,6 +154,8 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
cpus,
|
||||
};
|
||||
|
||||
// FIXME only save the host when it is new or something changed in it.
|
||||
// we currently are saving the host every time it is discovered.
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
@@ -132,25 +176,111 @@ impl<T: Topology> Interpret<T> for DiscoverInventoryAgentInterpret {
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await;
|
||||
Ok(Outcome::success(
|
||||
"Discovery process completed successfully".to_string(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::DiscoverInventoryAgent
|
||||
}
|
||||
// async fn launch_cidr_discovery(&self, cidr : &Ipv4Cidr, port: u16) {
|
||||
// todo!("launnch cidr discovery for {cidr} : {port}
|
||||
// - Iterate over all possible addresses in cidr
|
||||
// - make calls in batches of 20 attempting to reach harmony inventory agent on <addr, port> using same as above harmony_inventory_agent::client::get_host_inventory(&address, port)
|
||||
// - Log warn when response is 404, it means the port was used by something else unexpected
|
||||
// - Log error when response is 5xx
|
||||
// - Log debug when no response (timeout 15 seconds)
|
||||
// - Log info when found and response is 2xx
|
||||
// ");
|
||||
// }
|
||||
async fn launch_cidr_discovery(&self, cidr: &Ipv4Cidr, port: u16) {
|
||||
let addrs: Vec<Ipv4Inet> = cidr.iter().collect();
|
||||
let total = addrs.len();
|
||||
info!(
|
||||
"Starting CIDR discovery for {} hosts on {}/{} (port {})",
|
||||
total,
|
||||
cidr.network_length(),
|
||||
cidr,
|
||||
port
|
||||
);
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
let batch_size: usize = 20;
|
||||
let timeout_secs = 5;
|
||||
let request_timeout = Duration::from_secs(timeout_secs);
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
let mut current_batch = 0;
|
||||
let num_batches = addrs.len() / batch_size;
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
for batch in addrs.chunks(batch_size) {
|
||||
current_batch += 1;
|
||||
info!("Starting query batch {current_batch} of {num_batches}, timeout {timeout_secs}");
|
||||
let mut tasks = Vec::with_capacity(batch.len());
|
||||
|
||||
for addr in batch {
|
||||
let addr = addr.address().to_string();
|
||||
let port = port;
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
match timeout(
|
||||
request_timeout,
|
||||
harmony_inventory_agent::client::get_host_inventory(&addr, port),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(host)) => {
|
||||
info!("Found and response is 2xx for {addr}:{port}");
|
||||
|
||||
// Reuse the same conversion to PhysicalHost as MDNS flow
|
||||
let harmony_inventory_agent::hwinfo::PhysicalHost {
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
} = host;
|
||||
|
||||
let host = PhysicalHost {
|
||||
id: Id::from(host_uuid),
|
||||
category: HostCategory::Server,
|
||||
network: network_interfaces,
|
||||
storage: storage_drives,
|
||||
labels: vec![Label {
|
||||
name: "discovered-by".to_string(),
|
||||
value: "harmony-inventory-agent".to_string(),
|
||||
}],
|
||||
memory_modules,
|
||||
cpus,
|
||||
};
|
||||
|
||||
// Save host to inventory
|
||||
let repo = InventoryRepositoryFactory::build()
|
||||
.await
|
||||
.map_err(|e| format!("Could not build repository : {e}"))
|
||||
.unwrap();
|
||||
if let Err(e) = repo.save(&host).await {
|
||||
log::debug!("Failed to save host {}: {e}", host.id);
|
||||
} else {
|
||||
info!("Saved host id {}, summary : {}", host.id, host.summary());
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
log::info!("Error querying inventory agent on {addr}:{port} : {e}");
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout for this host
|
||||
log::debug!("No response (timeout) for {addr}:{port}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
// Wait for this batch to complete
|
||||
for t in tasks {
|
||||
let _ = t.await;
|
||||
}
|
||||
}
|
||||
|
||||
info!("CIDR discovery completed");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,157 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use k8s_openapi::{
|
||||
api::core::v1::{Affinity, Toleration},
|
||||
apimachinery::pkg::apis::meta::v1::ObjectMeta,
|
||||
};
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "CatalogSource",
|
||||
plural = "catalogsources",
|
||||
namespaced = true,
|
||||
schema = "disabled"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CatalogSourceSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config_map: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub display_name: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub grpc_pod_config: Option<GrpcPodConfig>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub icon: Option<Icon>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority: Option<i64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub publisher: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub run_as_root: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub secrets: Option<Vec<String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub source_type: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub update_strategy: Option<UpdateStrategy>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GrpcPodConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub affinity: Option<Affinity>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub extract_content: Option<ExtractContent>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub memory_target: Option<Value>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<BTreeMap<String, String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority_class_name: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub security_context_config: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tolerations: Option<Vec<Toleration>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExtractContent {
|
||||
pub cache_dir: String,
|
||||
pub catalog_dir: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Icon {
|
||||
pub base64data: String,
|
||||
pub mediatype: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateStrategy {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub registry_poll: Option<RegistryPoll>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RegistryPoll {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub interval: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CatalogSource {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: CatalogSourceSpec {
|
||||
address: None,
|
||||
config_map: None,
|
||||
description: None,
|
||||
display_name: None,
|
||||
grpc_pod_config: None,
|
||||
icon: None,
|
||||
image: None,
|
||||
priority: None,
|
||||
publisher: None,
|
||||
run_as_root: None,
|
||||
secrets: None,
|
||||
source_type: None,
|
||||
update_strategy: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CatalogSourceSpec {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
address: None,
|
||||
config_map: None,
|
||||
description: None,
|
||||
display_name: None,
|
||||
grpc_pod_config: None,
|
||||
icon: None,
|
||||
image: None,
|
||||
priority: None,
|
||||
publisher: None,
|
||||
run_as_root: None,
|
||||
secrets: None,
|
||||
source_type: None,
|
||||
update_strategy: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
4
harmony/src/modules/k8s/apps/crd/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
mod catalogsources_operators_coreos_com;
|
||||
pub use catalogsources_operators_coreos_com::*;
|
||||
mod subscriptions_operators_coreos_com;
|
||||
pub use subscriptions_operators_coreos_com::*;
|
||||
@@ -0,0 +1,68 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use kube::CustomResource;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)]
|
||||
#[kube(
|
||||
group = "operators.coreos.com",
|
||||
version = "v1alpha1",
|
||||
kind = "Subscription",
|
||||
plural = "subscriptions",
|
||||
namespaced = true,
|
||||
schema = "disabled"
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscriptionSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub channel: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<SubscriptionConfig>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub install_plan_approval: Option<String>,
|
||||
|
||||
pub name: String,
|
||||
|
||||
pub source: String,
|
||||
|
||||
pub source_namespace: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub starting_csv: Option<String>,
|
||||
}
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SubscriptionConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub env: Option<Vec<k8s_openapi::api::core::v1::EnvVar>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<std::collections::BTreeMap<String, String>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tolerations: Option<Vec<k8s_openapi::api::core::v1::Toleration>>,
|
||||
}
|
||||
|
||||
impl Default for Subscription {
|
||||
fn default() -> Self {
|
||||
Subscription {
|
||||
metadata: ObjectMeta::default(),
|
||||
spec: SubscriptionSpec::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SubscriptionSpec {
|
||||
fn default() -> SubscriptionSpec {
|
||||
SubscriptionSpec {
|
||||
name: String::new(),
|
||||
source: String::new(),
|
||||
source_namespace: String::new(),
|
||||
channel: None,
|
||||
config: None,
|
||||
install_plan_approval: None,
|
||||
starting_csv: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
3
harmony/src/modules/k8s/apps/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod operatorhub;
|
||||
pub use operatorhub::*;
|
||||
pub mod crd;
|
||||
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
107
harmony/src/modules/k8s/apps/operatorhub.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
// Write operatorhub catalog score
|
||||
// for now this will only support on OKD with the default catalog and operatorhub setup and does not verify OLM state or anything else. Very opinionated and bare-bones to start
|
||||
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::interpret::Interpret;
|
||||
use crate::modules::k8s::apps::crd::{
|
||||
CatalogSource, CatalogSourceSpec, RegistryPoll, UpdateStrategy,
|
||||
};
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
|
||||
/// Installs the CatalogSource in a cluster which already has the required services and CRDs installed.
|
||||
///
|
||||
/// ```rust
|
||||
/// use harmony::modules::k8s::apps::OperatorHubCatalogSourceScore;
|
||||
///
|
||||
/// let score = OperatorHubCatalogSourceScore::default();
|
||||
/// ```
|
||||
///
|
||||
/// Required services:
|
||||
/// - catalog-operator
|
||||
/// - olm-operator
|
||||
///
|
||||
/// They are installed by default with OKD/Openshift
|
||||
///
|
||||
/// **Warning** : this initial implementation does not manage the dependencies. They must already
|
||||
/// exist in the cluster.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OperatorHubCatalogSourceScore {
|
||||
pub name: String,
|
||||
pub namespace: String,
|
||||
pub image: String,
|
||||
}
|
||||
|
||||
impl OperatorHubCatalogSourceScore {
|
||||
pub fn new(name: &str, namespace: &str, image: &str) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
namespace: namespace.to_string(),
|
||||
image: image.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for OperatorHubCatalogSourceScore {
|
||||
/// This default implementation will create this k8s resource :
|
||||
///
|
||||
/// ```yaml
|
||||
/// apiVersion: operators.coreos.com/v1alpha1
|
||||
/// kind: CatalogSource
|
||||
/// metadata:
|
||||
/// name: operatorhubio-catalog
|
||||
/// namespace: openshift-marketplace
|
||||
/// spec:
|
||||
/// sourceType: grpc
|
||||
/// image: quay.io/operatorhubio/catalog:latest
|
||||
/// displayName: Operatorhub Operators
|
||||
/// publisher: OperatorHub.io
|
||||
/// updateStrategy:
|
||||
/// registryPoll:
|
||||
/// interval: 60m
|
||||
/// ```
|
||||
fn default() -> Self {
|
||||
OperatorHubCatalogSourceScore {
|
||||
name: "operatorhubio-catalog".to_string(),
|
||||
namespace: "openshift-marketplace".to_string(),
|
||||
image: "quay.io/operatorhubio/catalog:latest".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for OperatorHubCatalogSourceScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let metadata = ObjectMeta {
|
||||
name: Some(self.name.clone()),
|
||||
namespace: Some(self.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
};
|
||||
|
||||
let spec = CatalogSourceSpec {
|
||||
source_type: Some("grpc".to_string()),
|
||||
image: Some(self.image.clone()),
|
||||
display_name: Some("Operatorhub Operators".to_string()),
|
||||
publisher: Some("OperatorHub.io".to_string()),
|
||||
update_strategy: Some(UpdateStrategy {
|
||||
registry_poll: Some(RegistryPoll {
|
||||
interval: Some("60m".to_string()),
|
||||
}),
|
||||
}),
|
||||
..CatalogSourceSpec::default()
|
||||
};
|
||||
|
||||
let catalog_source = CatalogSource {
|
||||
metadata,
|
||||
spec: spec,
|
||||
};
|
||||
|
||||
K8sResourceScore::single(catalog_source, Some(self.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("OperatorHubCatalogSourceScore({})", self.name)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod apps;
|
||||
pub mod deployment;
|
||||
pub mod ingress;
|
||||
pub mod namespace;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod application;
|
||||
pub mod brocade;
|
||||
pub mod cert_manager;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
@@ -13,6 +14,7 @@ pub mod load_balancer;
|
||||
pub mod monitoring;
|
||||
pub mod okd;
|
||||
pub mod opnsense;
|
||||
pub mod postgresql;
|
||||
pub mod prometheus;
|
||||
pub mod storage;
|
||||
pub mod tenant;
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::{
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::inventory::DiscoverHostForRoleScore,
|
||||
modules::inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -104,6 +104,8 @@ When you can dig them, confirm to continue.
|
||||
bootstrap_host = hosts.into_iter().next().to_owned();
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::Bootstrap,
|
||||
number_desired_hosts: 1,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy::MDNS,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
@@ -1,20 +1,10 @@
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore, http::IPxeMacBootFileScore,
|
||||
inventory::DiscoverHostForRoleScore, okd::templates::BootstrapIpxeTpl,
|
||||
},
|
||||
interpret::Interpret,
|
||||
inventory::HostRole,
|
||||
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding},
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
@@ -23,231 +13,23 @@ use serde::Serialize;
|
||||
// - Persist bonding via MachineConfigs (or NNCP) once SCOS is active.
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDSetup03ControlPlaneScore {}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDSetup03ControlPlaneScore {
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDSetup03ControlPlaneScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDSetup03ControlPlaneInterpret::new())
|
||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||
// and for the cluster operators to become available. This would be similar to
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
HostRole::ControlPlane,
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"OKDSetup03ControlPlaneScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDSetup03ControlPlaneInterpret {
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl OKDSetup03ControlPlaneInterpret {
|
||||
pub fn new() -> Self {
|
||||
let version = Version::from("1.0.0").unwrap();
|
||||
Self {
|
||||
version,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures that three physical hosts are discovered and available for the ControlPlane role.
|
||||
/// It will trigger discovery if not enough hosts are found.
|
||||
async fn get_nodes(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
const REQUIRED_HOSTS: usize = 3;
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
let mut control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
|
||||
while control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
info!(
|
||||
"Discovery of {} control plane hosts in progress, current number {}",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: HostRole::ControlPlane,
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
control_plane_hosts = repo.get_host_for_role(&HostRole::ControlPlane).await?;
|
||||
}
|
||||
|
||||
if control_plane_hosts.len() < REQUIRED_HOSTS {
|
||||
Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} control plane hosts, but only found {}. Cannot proceed.",
|
||||
REQUIRED_HOSTS,
|
||||
control_plane_hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(control_plane_hosts
|
||||
.into_iter()
|
||||
.take(REQUIRED_HOSTS)
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures DHCP host bindings for all control plane nodes.
|
||||
async fn configure_host_binding(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Configuring host bindings for control plane nodes.");
|
||||
|
||||
// Ensure the topology definition matches the number of physical nodes found.
|
||||
if topology.control_plane.len() != nodes.len() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Mismatch between logical control plane hosts defined in topology ({}) and physical nodes found ({}).",
|
||||
topology.control_plane.len(),
|
||||
nodes.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Create a binding for each physical host to its corresponding logical host.
|
||||
let bindings: Vec<HostBinding> = topology
|
||||
.control_plane
|
||||
.iter()
|
||||
.zip(nodes.iter())
|
||||
.map(|(logical_host, physical_host)| {
|
||||
info!(
|
||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||
logical_host.name, physical_host.id
|
||||
);
|
||||
HostBinding {
|
||||
logical_host: logical_host.clone(),
|
||||
physical_host: physical_host.clone(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
DhcpHostBindingScore {
|
||||
host_binding: bindings,
|
||||
domain: Some(topology.domain_name.clone()),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Renders and deploys a per-MAC iPXE boot file for each control plane node.
|
||||
async fn configure_ipxe(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!("[ControlPlane] Rendering per-MAC iPXE configurations.");
|
||||
|
||||
// The iPXE script content is the same for all control plane nodes,
|
||||
// pointing to the 'master.ign' ignition file.
|
||||
let content = BootstrapIpxeTpl {
|
||||
http_ip: &topology.http_server.get_ip().to_string(),
|
||||
scos_path: "scos",
|
||||
ignition_http_path: "okd_ignition_files",
|
||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||
ignition_file_name: "master.ign", // Control plane nodes use the master ignition file
|
||||
}
|
||||
.to_string();
|
||||
|
||||
debug!("[ControlPlane] iPXE content template:\n{content}");
|
||||
|
||||
// Create and apply an iPXE boot file for each node.
|
||||
for node in nodes {
|
||||
let mac_address = node.get_mac_address();
|
||||
if mac_address.is_empty() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||
node.id
|
||||
)));
|
||||
}
|
||||
info!(
|
||||
"[ControlPlane] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||
node.id, mac_address
|
||||
);
|
||||
|
||||
IPxeMacBootFileScore {
|
||||
mac_address,
|
||||
content: content.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prompts the user to reboot the target control plane nodes.
|
||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||
info!("[ControlPlane] Requesting reboot for control plane nodes: {node_ids:?}",);
|
||||
|
||||
let confirmation = inquire::Confirm::new(
|
||||
&format!("Please reboot the {} control plane nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), node_ids.join(", ")),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||
|
||||
if !confirmation {
|
||||
return Err(InterpretError::new(
|
||||
"User aborted the operation.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDSetup03ControlPlaneInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDSetup03ControlPlane")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
// 1. Ensure we have 3 physical hosts for the control plane.
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
// 2. Create DHCP reservations for the control plane nodes.
|
||||
self.configure_host_binding(inventory, topology, &nodes)
|
||||
.await?;
|
||||
|
||||
// 3. Create iPXE files for each control plane node to boot from the master ignition.
|
||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||
|
||||
// 4. Reboot the nodes to start the OS installation.
|
||||
self.reboot_targets(&nodes).await?;
|
||||
|
||||
// TODO: Implement a step to wait for the control plane nodes to join the cluster
|
||||
// and for the cluster operators to become available. This would be similar to
|
||||
// the `wait-for bootstrap-complete` command.
|
||||
info!("[ControlPlane] Provisioning initiated. Monitor the cluster convergence manually.");
|
||||
|
||||
Ok(Outcome::success(
|
||||
"Control plane provisioning has been successfully initiated.".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
interpret::Interpret,
|
||||
inventory::HostRole,
|
||||
modules::{inventory::HarmonyDiscoveryStrategy, okd::bootstrap_okd_node::OKDNodeInterpret},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
};
|
||||
@@ -18,66 +14,20 @@ use crate::{
|
||||
// - Persist bonding via MC/NNCP as required (same approach as masters).
|
||||
// -------------------------------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDSetup04WorkersScore {}
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct OKDSetup04WorkersScore {
|
||||
pub discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDSetup04WorkersScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDSetup04WorkersInterpret::new(self.clone()))
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
HostRole::ControlPlane,
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
"OKDSetup04WorkersScore".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDSetup04WorkersInterpret {
|
||||
score: OKDSetup04WorkersScore,
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl OKDSetup04WorkersInterpret {
|
||||
pub fn new(score: OKDSetup04WorkersScore) -> Self {
|
||||
let version = Version::from("1.0.0").unwrap();
|
||||
Self {
|
||||
version,
|
||||
score,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
|
||||
async fn render_and_reboot(&self) -> Result<(), InterpretError> {
|
||||
info!("[Workers] Rendering per-MAC PXE for workers and rebooting");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDSetup04WorkersInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDSetup04Workers")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
_topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
self.render_and_reboot().await?;
|
||||
Ok(Outcome::success("Workers provisioned".into()))
|
||||
}
|
||||
}
|
||||
|
||||
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
313
harmony/src/modules/okd/bootstrap_okd_node.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
use async_trait::async_trait;
|
||||
use derive_new::new;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
data::Version,
|
||||
hardware::PhysicalHost,
|
||||
infra::inventory::InventoryRepositoryFactory,
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::{HostRole, Inventory},
|
||||
modules::{
|
||||
dhcp::DhcpHostBindingScore,
|
||||
http::IPxeMacBootFileScore,
|
||||
inventory::{DiscoverHostForRoleScore, HarmonyDiscoveryStrategy},
|
||||
okd::{
|
||||
okd_node::{BootstrapRole, ControlPlaneRole, OKDRoleProperties, WorkerRole},
|
||||
templates::BootstrapIpxeTpl,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HAClusterTopology, HostBinding, LogicalHost},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, new)]
|
||||
pub struct OKDNodeInstallationScore {
|
||||
host_role: HostRole,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl Score<HAClusterTopology> for OKDNodeInstallationScore {
|
||||
fn name(&self) -> String {
|
||||
"OKDNodeScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<HAClusterTopology>> {
|
||||
Box::new(OKDNodeInterpret::new(
|
||||
self.host_role.clone(),
|
||||
self.discovery_strategy.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OKDNodeInterpret {
|
||||
host_role: HostRole,
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
}
|
||||
|
||||
impl OKDNodeInterpret {
|
||||
pub fn new(host_role: HostRole, discovery_strategy: HarmonyDiscoveryStrategy) -> Self {
|
||||
Self {
|
||||
host_role,
|
||||
discovery_strategy,
|
||||
}
|
||||
}
|
||||
|
||||
fn okd_role_properties(&self, role: &HostRole) -> &'static dyn OKDRoleProperties {
|
||||
match role {
|
||||
HostRole::Bootstrap => &BootstrapRole,
|
||||
HostRole::ControlPlane => &ControlPlaneRole,
|
||||
HostRole::Worker => &WorkerRole,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_nodes(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Vec<PhysicalHost>, InterpretError> {
|
||||
let repo = InventoryRepositoryFactory::build().await?;
|
||||
|
||||
let mut hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||
|
||||
let okd_host_properties = self.okd_role_properties(&self.host_role);
|
||||
|
||||
let required_hosts: i16 = okd_host_properties.required_hosts();
|
||||
|
||||
info!(
|
||||
"Discovery of {} {} hosts in progress, current number {}",
|
||||
required_hosts,
|
||||
self.host_role,
|
||||
hosts.len()
|
||||
);
|
||||
// This score triggers the discovery agent for a specific role.
|
||||
DiscoverHostForRoleScore {
|
||||
role: self.host_role.clone(),
|
||||
number_desired_hosts: required_hosts,
|
||||
discovery_strategy: self.discovery_strategy.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
hosts = repo.get_host_for_role(&self.host_role).await?;
|
||||
|
||||
if hosts.len() < required_hosts.try_into().unwrap_or(0) {
|
||||
Err(InterpretError::new(format!(
|
||||
"OKD Requires at least {} {} hosts, but only found {}. Cannot proceed.",
|
||||
required_hosts,
|
||||
self.host_role,
|
||||
hosts.len()
|
||||
)))
|
||||
} else {
|
||||
// Take exactly the number of required hosts to ensure consistency.
|
||||
Ok(hosts
|
||||
.into_iter()
|
||||
.take(required_hosts.try_into().unwrap())
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures DHCP host bindings for all nodes.
|
||||
async fn configure_host_binding(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!(
|
||||
"[{}] Configuring host bindings for {} plane nodes.",
|
||||
self.host_role, self.host_role,
|
||||
);
|
||||
|
||||
let host_properties = self.okd_role_properties(&self.host_role);
|
||||
|
||||
self.validate_host_node_match(nodes, host_properties.logical_hosts(topology))?;
|
||||
|
||||
let bindings: Vec<HostBinding> =
|
||||
self.host_bindings(nodes, host_properties.logical_hosts(topology));
|
||||
|
||||
DhcpHostBindingScore {
|
||||
host_binding: bindings,
|
||||
domain: Some(topology.domain_name.clone()),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ensure the topology definition matches the number of physical nodes found.
|
||||
fn validate_host_node_match(
|
||||
&self,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
hosts: &Vec<LogicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
if hosts.len() != nodes.len() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Mismatch between logical hosts defined in topology ({}) and physical nodes found ({}).",
|
||||
hosts.len(),
|
||||
nodes.len()
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Create a binding for each physical host to its corresponding logical host.
|
||||
fn host_bindings(
|
||||
&self,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
hosts: &Vec<LogicalHost>,
|
||||
) -> Vec<HostBinding> {
|
||||
hosts
|
||||
.iter()
|
||||
.zip(nodes.iter())
|
||||
.map(|(logical_host, physical_host)| {
|
||||
info!(
|
||||
"Creating binding: Logical Host '{}' -> Physical Host ID '{}'",
|
||||
logical_host.name, physical_host.id
|
||||
);
|
||||
HostBinding {
|
||||
logical_host: logical_host.clone(),
|
||||
physical_host: physical_host.clone(),
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Renders and deploys a per-MAC iPXE boot file for each node.
|
||||
async fn configure_ipxe(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
nodes: &Vec<PhysicalHost>,
|
||||
) -> Result<(), InterpretError> {
|
||||
info!(
|
||||
"[{}] Rendering per-MAC iPXE configurations.",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
let okd_role_properties = self.okd_role_properties(&self.host_role);
|
||||
// The iPXE script content is the same for all control plane nodes,
|
||||
// pointing to the 'master.ign' ignition file.
|
||||
let content = BootstrapIpxeTpl {
|
||||
http_ip: &topology.http_server.get_ip().to_string(),
|
||||
scos_path: "scos",
|
||||
ignition_http_path: "okd_ignition_files",
|
||||
//TODO must be refactored to not only use /dev/sda
|
||||
installation_device: "/dev/sda", // This might need to be configurable per-host in the future
|
||||
ignition_file_name: okd_role_properties.ignition_file(),
|
||||
}
|
||||
.to_string();
|
||||
|
||||
debug!("[{}] iPXE content template:\n{content}", self.host_role);
|
||||
|
||||
// Create and apply an iPXE boot file for each node.
|
||||
for node in nodes {
|
||||
let mac_address = node.get_mac_address();
|
||||
if mac_address.is_empty() {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Physical host with ID '{}' has no MAC addresses defined.",
|
||||
node.id
|
||||
)));
|
||||
}
|
||||
info!(
|
||||
"[{}] Applying iPXE config for node ID '{}' with MACs: {:?}",
|
||||
self.host_role, node.id, mac_address
|
||||
);
|
||||
|
||||
IPxeMacBootFileScore {
|
||||
mac_address,
|
||||
content: content.clone(),
|
||||
}
|
||||
.interpret(inventory, topology)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prompts the user to reboot the target control plane nodes.
|
||||
async fn reboot_targets(&self, nodes: &Vec<PhysicalHost>) -> Result<(), InterpretError> {
|
||||
let node_ids: Vec<String> = nodes.iter().map(|n| n.id.to_string()).collect();
|
||||
info!(
|
||||
"[{}] Requesting reboot for control plane nodes: {node_ids:?}",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
let confirmation = inquire::Confirm::new(
|
||||
&format!("Please reboot the {} {} nodes ({}) to apply their PXE configuration. Press enter when ready.", nodes.len(), self.host_role, node_ids.join(", ")),
|
||||
)
|
||||
.prompt()
|
||||
.map_err(|e| InterpretError::new(format!("User prompt failed: {e}")))?;
|
||||
|
||||
if !confirmation {
|
||||
return Err(InterpretError::new(
|
||||
"User aborted the operation.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Interpret<HAClusterTopology> for OKDNodeInterpret {
|
||||
async fn execute(
|
||||
&self,
|
||||
inventory: &Inventory,
|
||||
topology: &HAClusterTopology,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
// 1. Ensure we have the specfied number of physical hosts.
|
||||
let nodes = self.get_nodes(inventory, topology).await?;
|
||||
|
||||
// 2. Create DHCP reservations for the nodes.
|
||||
self.configure_host_binding(inventory, topology, &nodes)
|
||||
.await?;
|
||||
|
||||
// 3. Create iPXE files for each node to boot from the ignition.
|
||||
self.configure_ipxe(inventory, topology, &nodes).await?;
|
||||
|
||||
// 4. Reboot the nodes to start the OS installation.
|
||||
self.reboot_targets(&nodes).await?;
|
||||
// TODO: Implement a step to validate that the installation of the nodes is
|
||||
// complete and for the cluster operators to become available.
|
||||
//
|
||||
// The OpenShift installer only provides two wait commands which currently need to be
|
||||
// run manually:
|
||||
// - `openshift-install wait-for bootstrap-complete`
|
||||
// - `openshift-install wait-for install-complete`
|
||||
//
|
||||
// There is no installer command that waits specifically for worker node
|
||||
// provisioning. Worker nodes join asynchronously (via ignition + CSR approval),
|
||||
// and the cluster becomes fully functional only once all nodes are Ready and the
|
||||
// cluster operators report Available=True.
|
||||
info!(
|
||||
"[{}] Provisioning initiated. Monitor the cluster convergence manually.",
|
||||
self.host_role
|
||||
);
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"{} provisioning has been successfully initiated.",
|
||||
self.host_role
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("OKDNodeSetup".into())
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Version {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use kube::CustomResource;
|
||||
use k8s_openapi::{ClusterResourceScope, Resource};
|
||||
use kube::{CustomResource, api::ObjectMeta};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -47,28 +48,223 @@ pub struct ProbeDns {
|
||||
group = "nmstate.io",
|
||||
version = "v1",
|
||||
kind = "NodeNetworkConfigurationPolicy",
|
||||
namespaced
|
||||
namespaced = false
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkConfigurationPolicySpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node_selector: Option<BTreeMap<String, String>>,
|
||||
pub desired_state: DesiredStateSpec,
|
||||
pub desired_state: NetworkState,
|
||||
}
|
||||
|
||||
// Currently, kube-rs derive doesn't support resources without a `spec` field, so we have
|
||||
// to implement it ourselves.
|
||||
//
|
||||
// Ref:
|
||||
// - https://github.com/kube-rs/kube/issues/1763
|
||||
// - https://github.com/kube-rs/kube/discussions/1762
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkState {
|
||||
metadata: ObjectMeta,
|
||||
pub status: NodeNetworkStateStatus,
|
||||
}
|
||||
|
||||
impl Resource for NodeNetworkState {
|
||||
const API_VERSION: &'static str = "nmstate.io/v1beta1";
|
||||
const GROUP: &'static str = "nmstate.io";
|
||||
const VERSION: &'static str = "v1beta1";
|
||||
const KIND: &'static str = "NodeNetworkState";
|
||||
const URL_PATH_SEGMENT: &'static str = "nodenetworkstates";
|
||||
type Scope = ClusterResourceScope;
|
||||
}
|
||||
|
||||
impl k8s_openapi::Metadata for NodeNetworkState {
|
||||
type Ty = ObjectMeta;
|
||||
|
||||
fn metadata(&self) -> &Self::Ty {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut Self::Ty {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeNetworkStateStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub current_state: Option<NetworkState>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub handler_nmstate_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub host_network_manager_version: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_successful_update_time: Option<String>,
|
||||
}
|
||||
|
||||
/// The NetworkState is the top-level struct, representing the entire
|
||||
/// desired or current network state.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DesiredStateSpec {
|
||||
pub interfaces: Vec<InterfaceSpec>,
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct NetworkState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hostname: Option<HostNameState>,
|
||||
#[serde(rename = "dns-resolver", skip_serializing_if = "Option::is_none")]
|
||||
pub dns: Option<DnsState>,
|
||||
#[serde(rename = "route-rules", skip_serializing_if = "Option::is_none")]
|
||||
pub rules: Option<RouteRuleState>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub routes: Option<RouteState>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub interfaces: Vec<Interface>,
|
||||
#[serde(rename = "ovs-db", skip_serializing_if = "Option::is_none")]
|
||||
pub ovsdb: Option<OvsDbGlobalConfig>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ovn: Option<OvnConfiguration>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct InterfaceSpec {
|
||||
pub struct HostNameState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<DnsResolverConfig>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<DnsResolverConfig>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsResolverConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub search: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteRuleState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<Vec<RouteRule>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<Vec<RouteRule>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<Vec<Route>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub running: Option<Vec<Route>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RouteRule {
|
||||
#[serde(rename = "ip-from", skip_serializing_if = "Option::is_none")]
|
||||
pub ip_from: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub priority: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub route_table: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Route {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub destination: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub metric: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_hop_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_hop_interface: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub table_id: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mtu: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsDbGlobalConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_ids: Option<BTreeMap<String, String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub other_config: Option<BTreeMap<String, String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvnConfiguration {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_mappings: Option<Vec<OvnBridgeMapping>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvnBridgeMapping {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub localnet: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(untagged)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum StpSpec {
|
||||
Bool(bool),
|
||||
Options(StpOptions),
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LldpState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsDb {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_ids: Option<BTreeMap<String, String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub other_config: Option<BTreeMap<String, String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct PatchState {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub peer: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Interface {
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
pub r#type: String,
|
||||
pub r#type: InterfaceType,
|
||||
pub state: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac_address: Option<String>,
|
||||
@@ -99,9 +295,81 @@ pub struct InterfaceSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub linux_bridge: Option<LinuxBridgeSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(alias = "bridge")]
|
||||
pub ovs_bridge: Option<OvsBridgeSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ethtool: Option<EthtoolSpec>,
|
||||
pub ethtool: Option<Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub accept_all_mac_addresses: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub identifier: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lldp: Option<LldpState>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub permanent_mac_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_mtu: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub min_mtu: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mptcp: Option<Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub profile_name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub wait_ip: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ovs_db: Option<OvsDb>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub driver: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub patch: Option<PatchState>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum InterfaceType {
|
||||
#[serde(rename = "unknown")]
|
||||
Unknown,
|
||||
#[serde(rename = "dummy")]
|
||||
Dummy,
|
||||
#[serde(rename = "loopback")]
|
||||
Loopback,
|
||||
#[serde(rename = "linux-bridge")]
|
||||
LinuxBridge,
|
||||
#[serde(rename = "ovs-bridge")]
|
||||
OvsBridge,
|
||||
#[serde(rename = "ovs-interface")]
|
||||
OvsInterface,
|
||||
#[serde(rename = "bond")]
|
||||
Bond,
|
||||
#[serde(rename = "ipvlan")]
|
||||
IpVlan,
|
||||
#[serde(rename = "vlan")]
|
||||
Vlan,
|
||||
#[serde(rename = "vxlan")]
|
||||
Vxlan,
|
||||
#[serde(rename = "mac-vlan")]
|
||||
Macvlan,
|
||||
#[serde(rename = "mac-vtap")]
|
||||
Macvtap,
|
||||
#[serde(rename = "ethernet")]
|
||||
Ethernet,
|
||||
#[serde(rename = "infiniband")]
|
||||
Infiniband,
|
||||
#[serde(rename = "vrf")]
|
||||
Vrf,
|
||||
#[serde(rename = "veth")]
|
||||
Veth,
|
||||
#[serde(rename = "ipsec")]
|
||||
Ipsec,
|
||||
#[serde(rename = "hsr")]
|
||||
Hrs,
|
||||
}
|
||||
|
||||
impl Default for InterfaceType {
|
||||
fn default() -> Self {
|
||||
Self::Loopback
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
@@ -149,6 +417,7 @@ pub struct EthernetSpec {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct BondSpec {
|
||||
pub mode: String,
|
||||
#[serde(alias = "port")]
|
||||
pub ports: Vec<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub options: Option<BTreeMap<String, Value>>,
|
||||
@@ -287,11 +556,15 @@ pub struct OvsBridgeSpec {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OvsBridgeOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub stp: Option<bool>,
|
||||
pub stp: Option<StpSpec>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rstp: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mcast_snooping_enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub datapath: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fail_mode: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
@@ -305,18 +578,3 @@ pub struct OvsPortSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub r#type: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthtoolSpec {
|
||||
// TODO: Properly describe this spec (https://nmstate.io/devel/yaml_api.html#ethtool)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct EthtoolFecSpec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auto: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<String>,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::id::Id;
|
||||
use log::{debug, info};
|
||||
use log::{info, warn};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
@@ -9,7 +9,7 @@ use crate::{
|
||||
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
|
||||
inventory::Inventory,
|
||||
score::Score,
|
||||
topology::{HostNetworkConfig, NetworkInterface, Switch, SwitchPort, Topology},
|
||||
topology::{HostNetworkConfig, NetworkInterface, NetworkManager, Switch, SwitchPort, Topology},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@@ -17,7 +17,7 @@ pub struct HostNetworkConfigurationScore {
|
||||
pub hosts: Vec<PhysicalHost>,
|
||||
}
|
||||
|
||||
impl<T: Topology + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||
impl<T: Topology + NetworkManager + Switch> Score<T> for HostNetworkConfigurationScore {
|
||||
fn name(&self) -> String {
|
||||
"HostNetworkConfigurationScore".into()
|
||||
}
|
||||
@@ -35,7 +35,7 @@ pub struct HostNetworkConfigurationInterpret {
|
||||
}
|
||||
|
||||
impl HostNetworkConfigurationInterpret {
|
||||
async fn configure_network_for_host<T: Topology + Switch>(
|
||||
async fn configure_network_for_host<T: Topology + NetworkManager + Switch>(
|
||||
&self,
|
||||
topology: &T,
|
||||
host: &PhysicalHost,
|
||||
@@ -49,6 +49,13 @@ impl HostNetworkConfigurationInterpret {
|
||||
switch_ports: vec![],
|
||||
});
|
||||
}
|
||||
if host.network.len() == 1 {
|
||||
info!("[Host {current_host}/{total_hosts}] Only one interface to configure, skipping");
|
||||
return Ok(HostNetworkConfig {
|
||||
host_id: host.id.clone(),
|
||||
switch_ports: vec![],
|
||||
});
|
||||
}
|
||||
|
||||
let switch_ports = self
|
||||
.collect_switch_ports_for_host(topology, host, current_host, total_hosts)
|
||||
@@ -59,7 +66,7 @@ impl HostNetworkConfigurationInterpret {
|
||||
switch_ports,
|
||||
};
|
||||
|
||||
if !config.switch_ports.is_empty() {
|
||||
if config.switch_ports.len() > 1 {
|
||||
info!(
|
||||
"[Host {current_host}/{total_hosts}] Found {} ports for {} interfaces",
|
||||
config.switch_ports.len(),
|
||||
@@ -67,15 +74,25 @@ impl HostNetworkConfigurationInterpret {
|
||||
);
|
||||
|
||||
info!("[Host {current_host}/{total_hosts}] Configuring host network...");
|
||||
topology.configure_bond(&config).await.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||
})?;
|
||||
topology
|
||||
.configure_host_network(&config)
|
||||
.configure_port_channel(&config)
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("Failed to configure host: {e}")))?;
|
||||
} else {
|
||||
.map_err(|e| {
|
||||
InterpretError::new(format!("Failed to configure host network: {e}"))
|
||||
})?;
|
||||
} else if config.switch_ports.is_empty() {
|
||||
info!(
|
||||
"[Host {current_host}/{total_hosts}] No ports found for {} interfaces, skipping",
|
||||
host.network.len()
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
"[Host {current_host}/{total_hosts}] Found a single port for {} interfaces, skipping",
|
||||
host.network.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
@@ -113,7 +130,7 @@ impl HostNetworkConfigurationInterpret {
|
||||
port,
|
||||
});
|
||||
}
|
||||
Ok(None) => debug!("No port found for '{mac_address}', skipping"),
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
return Err(InterpretError::new(format!(
|
||||
"Failed to get port for host '{}': {}",
|
||||
@@ -133,15 +150,6 @@ impl HostNetworkConfigurationInterpret {
|
||||
];
|
||||
|
||||
for config in configs {
|
||||
let host = self
|
||||
.score
|
||||
.hosts
|
||||
.iter()
|
||||
.find(|h| h.id == config.host_id)
|
||||
.unwrap();
|
||||
|
||||
println!("[Host] {host}");
|
||||
|
||||
if config.switch_ports.is_empty() {
|
||||
report.push(format!(
|
||||
"⏭️ Host {}: SKIPPED (No matching switch ports found)",
|
||||
@@ -169,7 +177,7 @@ impl HostNetworkConfigurationInterpret {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
impl<T: Topology + NetworkManager + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("HostNetworkConfigurationInterpret")
|
||||
}
|
||||
@@ -198,6 +206,12 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
let host_count = self.score.hosts.len();
|
||||
info!("Started network configuration for {host_count} host(s)...",);
|
||||
|
||||
info!("Setting up NetworkManager...",);
|
||||
topology
|
||||
.ensure_network_manager_installed()
|
||||
.await
|
||||
.map_err(|e| InterpretError::new(format!("NetworkManager setup failed: {e}")))?;
|
||||
|
||||
info!("Setting up switch with sane defaults...");
|
||||
topology
|
||||
.setup_switch()
|
||||
@@ -216,6 +230,7 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
host_configurations.push(host_configuration);
|
||||
current_host += 1;
|
||||
}
|
||||
|
||||
if current_host > 1 {
|
||||
let details = self.format_host_configuration(host_configurations);
|
||||
|
||||
@@ -236,13 +251,15 @@ impl<T: Topology + Switch> Interpret<T> for HostNetworkConfigurationInterpret {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use assertor::*;
|
||||
use brocade::PortOperatingMode;
|
||||
use harmony_types::{net::MacAddress, switch::PortLocation};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use crate::{
|
||||
hardware::HostCategory,
|
||||
topology::{
|
||||
HostNetworkConfig, PreparationError, PreparationOutcome, SwitchError, SwitchPort,
|
||||
HostNetworkConfig, NetworkError, PortConfig, PreparationError, PreparationOutcome,
|
||||
SwitchError, SwitchPort,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
@@ -267,6 +284,18 @@ mod tests {
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref YET_ANOTHER_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F3".to_string()).unwrap(),
|
||||
name: "interface-3".into(),
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref LAST_EXISTING_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("AA:BB:CC:DD:EE:F4".to_string()).unwrap(),
|
||||
name: "interface-4".into(),
|
||||
speed_mbps: None,
|
||||
mtu: 1,
|
||||
};
|
||||
pub static ref UNKNOWN_INTERFACE: NetworkInterface = NetworkInterface {
|
||||
mac_address: MacAddress::try_from("11:22:33:44:55:61".to_string()).unwrap(),
|
||||
name: "unknown-interface".into(),
|
||||
@@ -275,6 +304,8 @@ mod tests {
|
||||
};
|
||||
pub static ref PORT: PortLocation = PortLocation(1, 0, 42);
|
||||
pub static ref ANOTHER_PORT: PortLocation = PortLocation(2, 0, 42);
|
||||
pub static ref YET_ANOTHER_PORT: PortLocation = PortLocation(1, 0, 45);
|
||||
pub static ref LAST_PORT: PortLocation = PortLocation(2, 0, 45);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -290,28 +321,33 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_one_mac_address_should_create_bond_with_one_interface() {
|
||||
async fn should_setup_network_manager() {
|
||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||
let score = given_score(vec![host]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
}],
|
||||
},
|
||||
)]);
|
||||
let network_manager_setup = topology.network_manager_setup.lock().unwrap();
|
||||
assert_that!(*network_manager_setup).is_true();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_multiple_mac_addresses_should_create_one_bond_with_all_interfaces() {
|
||||
async fn host_with_one_mac_address_should_skip_host_configuration() {
|
||||
let host = given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]);
|
||||
let score = given_score(vec![host]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn host_with_multiple_mac_addresses_should_configure_one_bond_with_all_interfaces() {
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
@@ -323,8 +359,8 @@ mod tests {
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![(
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
@@ -343,49 +379,183 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_hosts_should_create_one_bond_per_host() {
|
||||
async fn host_with_multiple_mac_addresses_should_configure_one_port_channel_with_all_interfaces()
|
||||
{
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
)]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
)]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_hosts_should_configure_one_bond_per_host() {
|
||||
let score = given_score(vec![
|
||||
given_host(&HOST_ID, vec![EXISTING_INTERFACE.clone()]),
|
||||
given_host(&ANOTHER_HOST_ID, vec![ANOTHER_EXISTING_INTERFACE.clone()]),
|
||||
given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
given_host(
|
||||
&ANOTHER_HOST_ID,
|
||||
vec![
|
||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
LAST_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).contains_exactly(vec![
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![
|
||||
(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
}],
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
ANOTHER_HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: ANOTHER_HOST_ID.clone(),
|
||||
switch_ports: vec![SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
}],
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: YET_ANOTHER_PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||
port: LAST_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn port_not_found_for_mac_address_should_not_configure_interface() {
|
||||
async fn multiple_hosts_should_configure_one_port_channel_per_host() {
|
||||
let score = given_score(vec![
|
||||
given_host(
|
||||
&HOST_ID,
|
||||
vec![
|
||||
EXISTING_INTERFACE.clone(),
|
||||
ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
given_host(
|
||||
&ANOTHER_HOST_ID,
|
||||
vec![
|
||||
YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
LAST_EXISTING_INTERFACE.clone(),
|
||||
],
|
||||
),
|
||||
]);
|
||||
let topology = TopologyWithSwitch::new();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).contains_exactly(vec![
|
||||
(
|
||||
HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: EXISTING_INTERFACE.clone(),
|
||||
port: PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: ANOTHER_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
ANOTHER_HOST_ID.clone(),
|
||||
HostNetworkConfig {
|
||||
host_id: ANOTHER_HOST_ID.clone(),
|
||||
switch_ports: vec![
|
||||
SwitchPort {
|
||||
interface: YET_ANOTHER_EXISTING_INTERFACE.clone(),
|
||||
port: YET_ANOTHER_PORT.clone(),
|
||||
},
|
||||
SwitchPort {
|
||||
interface: LAST_EXISTING_INTERFACE.clone(),
|
||||
port: LAST_PORT.clone(),
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn port_not_found_for_mac_address_should_not_configure_host() {
|
||||
let score = given_score(vec![given_host(&HOST_ID, vec![UNKNOWN_INTERFACE.clone()])]);
|
||||
let topology = TopologyWithSwitch::new_port_not_found();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let configured_host_networks = topology.configured_host_networks.lock().unwrap();
|
||||
assert_that!(*configured_host_networks).is_empty();
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn only_one_port_found_for_multiple_mac_addresses_should_not_configure_host() {
|
||||
let score = given_score(vec![given_host(
|
||||
&HOST_ID,
|
||||
vec![EXISTING_INTERFACE.clone(), UNKNOWN_INTERFACE.clone()],
|
||||
)]);
|
||||
let topology = TopologyWithSwitch::new_single_port_found();
|
||||
|
||||
let _ = score.interpret(&Inventory::empty(), &topology).await;
|
||||
|
||||
let config = topology.configured_port_channels.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
let config = topology.configured_bonds.lock().unwrap();
|
||||
assert_that!(*config).is_empty();
|
||||
}
|
||||
|
||||
fn given_score(hosts: Vec<PhysicalHost>) -> HostNetworkConfigurationScore {
|
||||
@@ -422,26 +592,48 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TopologyWithSwitch {
|
||||
available_ports: Arc<Mutex<Vec<PortLocation>>>,
|
||||
configured_host_networks: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
configured_port_channels: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
switch_setup: Arc<Mutex<bool>>,
|
||||
network_manager_setup: Arc<Mutex<bool>>,
|
||||
configured_bonds: Arc<Mutex<Vec<(Id, HostNetworkConfig)>>>,
|
||||
}
|
||||
|
||||
impl TopologyWithSwitch {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone(), ANOTHER_PORT.clone()])),
|
||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||
available_ports: Arc::new(Mutex::new(vec![
|
||||
PORT.clone(),
|
||||
ANOTHER_PORT.clone(),
|
||||
YET_ANOTHER_PORT.clone(),
|
||||
LAST_PORT.clone(),
|
||||
])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_port_not_found() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![])),
|
||||
configured_host_networks: Arc::new(Mutex::new(vec![])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_single_port_found() -> Self {
|
||||
Self {
|
||||
available_ports: Arc::new(Mutex::new(vec![PORT.clone()])),
|
||||
configured_port_channels: Arc::new(Mutex::new(vec![])),
|
||||
switch_setup: Arc::new(Mutex::new(false)),
|
||||
network_manager_setup: Arc::new(Mutex::new(false)),
|
||||
configured_bonds: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -457,6 +649,22 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NetworkManager for TopologyWithSwitch {
|
||||
async fn ensure_network_manager_installed(&self) -> Result<(), NetworkError> {
|
||||
let mut network_manager_installed = self.network_manager_setup.lock().unwrap();
|
||||
*network_manager_installed = true;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn configure_bond(&self, config: &HostNetworkConfig) -> Result<(), NetworkError> {
|
||||
let mut configured_bonds = self.configured_bonds.lock().unwrap();
|
||||
configured_bonds.push((config.host_id.clone(), config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Switch for TopologyWithSwitch {
|
||||
async fn setup_switch(&self) -> Result<(), SwitchError> {
|
||||
@@ -476,14 +684,23 @@ mod tests {
|
||||
Ok(Some(ports.remove(0)))
|
||||
}
|
||||
|
||||
async fn configure_host_network(
|
||||
async fn configure_port_channel(
|
||||
&self,
|
||||
config: &HostNetworkConfig,
|
||||
) -> Result<(), SwitchError> {
|
||||
let mut configured_host_networks = self.configured_host_networks.lock().unwrap();
|
||||
configured_host_networks.push((config.host_id.clone(), config.clone()));
|
||||
let mut configured_port_channels = self.configured_port_channels.lock().unwrap();
|
||||
configured_port_channels.push((config.host_id.clone(), config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn clear_port_channel(&self, ids: &Vec<Id>) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
async fn configure_interface(
|
||||
&self,
|
||||
port_config: &Vec<PortConfig>,
|
||||
) -> Result<(), SwitchError> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,10 +48,13 @@
|
||||
//! - internal_domain: Internal cluster domain (e.g., cluster.local or harmony.mcd).
|
||||
|
||||
use crate::{
|
||||
modules::okd::{
|
||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||
modules::{
|
||||
inventory::HarmonyDiscoveryStrategy,
|
||||
okd::{
|
||||
OKDSetup01InventoryScore, OKDSetup02BootstrapScore, OKDSetup03ControlPlaneScore,
|
||||
OKDSetup04WorkersScore, OKDSetup05SanityCheckScore, OKDSetupPersistNetworkBondScore,
|
||||
bootstrap_06_installation_report::OKDSetup06InstallationReportScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::HAClusterTopology,
|
||||
@@ -60,13 +63,19 @@ use crate::{
|
||||
pub struct OKDInstallationPipeline;
|
||||
|
||||
impl OKDInstallationPipeline {
|
||||
pub async fn get_all_scores() -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
||||
pub async fn get_all_scores(
|
||||
discovery_strategy: HarmonyDiscoveryStrategy,
|
||||
) -> Vec<Box<dyn Score<HAClusterTopology>>> {
|
||||
vec![
|
||||
Box::new(OKDSetup01InventoryScore::new()),
|
||||
Box::new(OKDSetup02BootstrapScore::new()),
|
||||
Box::new(OKDSetup03ControlPlaneScore::new()),
|
||||
Box::new(OKDSetup03ControlPlaneScore {
|
||||
discovery_strategy: discovery_strategy.clone(),
|
||||
}),
|
||||
Box::new(OKDSetupPersistNetworkBondScore::new()),
|
||||
Box::new(OKDSetup04WorkersScore::new()),
|
||||
Box::new(OKDSetup04WorkersScore {
|
||||
discovery_strategy: discovery_strategy.clone(),
|
||||
}),
|
||||
Box::new(OKDSetup05SanityCheckScore::new()),
|
||||
Box::new(OKDSetup06InstallationReportScore::new()),
|
||||
]
|
||||
|
||||
@@ -6,12 +6,14 @@ mod bootstrap_05_sanity_check;
|
||||
mod bootstrap_06_installation_report;
|
||||
pub mod bootstrap_dhcp;
|
||||
pub mod bootstrap_load_balancer;
|
||||
pub mod bootstrap_okd_node;
|
||||
mod bootstrap_persist_network_bond;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
pub mod installation;
|
||||
pub mod ipxe;
|
||||
pub mod load_balancer;
|
||||
pub mod okd_node;
|
||||
pub mod templates;
|
||||
pub mod upgrade;
|
||||
pub use bootstrap_01_prepare::*;
|
||||
|
||||
54
harmony/src/modules/okd/okd_node.rs
Normal file
54
harmony/src/modules/okd/okd_node.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use crate::topology::{HAClusterTopology, LogicalHost};
|
||||
|
||||
pub trait OKDRoleProperties {
|
||||
fn ignition_file(&self) -> &'static str;
|
||||
fn required_hosts(&self) -> i16;
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost>;
|
||||
}
|
||||
|
||||
pub struct BootstrapRole;
|
||||
pub struct ControlPlaneRole;
|
||||
pub struct WorkerRole;
|
||||
pub struct StorageRole;
|
||||
|
||||
impl OKDRoleProperties for BootstrapRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"bootstrap.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
1
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDRoleProperties for ControlPlaneRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"master.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
3
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
&t.control_plane
|
||||
}
|
||||
}
|
||||
|
||||
impl OKDRoleProperties for WorkerRole {
|
||||
fn ignition_file(&self) -> &'static str {
|
||||
"worker.ign"
|
||||
}
|
||||
|
||||
fn required_hosts(&self) -> i16 {
|
||||
2
|
||||
}
|
||||
|
||||
fn logical_hosts<'a>(&self, t: &'a HAClusterTopology) -> &'a Vec<LogicalHost> {
|
||||
&t.workers
|
||||
}
|
||||
}
|
||||
85
harmony/src/modules/postgresql/capability.rs
Normal file
85
harmony/src/modules/postgresql/capability.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use async_trait::async_trait;
|
||||
use harmony_types::storage::StorageSize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[async_trait]
|
||||
pub trait PostgreSQL: Send + Sync {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String>;
|
||||
|
||||
/// Extracts PostgreSQL-specific replication certs (PEM format) from a deployed primary cluster.
|
||||
/// Abstracts away storage/retrieval details (e.g., secrets, files).
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String>;
|
||||
|
||||
/// Gets the internal/private endpoint (e.g., k8s service FQDN:5432) for the cluster.
|
||||
async fn get_endpoint(&self, cluster_name: &str) -> Result<PostgreSQLEndpoint, String>;
|
||||
|
||||
/// Gets the public/externally routable endpoint if configured (e.g., OKD Route:443 for TLS passthrough).
|
||||
/// Returns None if no public endpoint (internal-only cluster).
|
||||
/// UNSTABLE: This is opinionated for initial multisite use cases. Networking abstraction is complex
|
||||
/// (cf. k8s Ingress -> Gateway API evolution); may move to higher-order Networking/PostgreSQLNetworking trait.
|
||||
async fn get_public_endpoint(
|
||||
&self,
|
||||
cluster_name: &str,
|
||||
) -> Result<Option<PostgreSQLEndpoint>, String>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct PostgreSQLConfig {
|
||||
pub cluster_name: String,
|
||||
pub instances: u32,
|
||||
pub storage_size: StorageSize,
|
||||
pub role: PostgreSQLClusterRole,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub enum PostgreSQLClusterRole {
|
||||
Primary,
|
||||
Replica(ReplicaConfig),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ReplicaConfig {
|
||||
/// Name of the primary cluster this replica will sync from
|
||||
pub primary_cluster_name: String,
|
||||
/// Certs extracted from primary via Topology::get_replication_certs()
|
||||
pub replication_certs: ReplicationCerts,
|
||||
/// Bootstrap method (e.g., pg_basebackup from primary)
|
||||
pub bootstrap: BootstrapConfig,
|
||||
/// External cluster connection details for CNPG spec.externalClusters
|
||||
pub external_cluster: ExternalClusterConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct BootstrapConfig {
|
||||
pub strategy: BootstrapStrategy,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub enum BootstrapStrategy {
|
||||
PgBasebackup,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ExternalClusterConfig {
|
||||
/// Name used in CNPG externalClusters list
|
||||
pub name: String,
|
||||
/// Connection params (host/port set by multisite logic, sslmode='verify-ca', etc.)
|
||||
pub connection_parameters: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ReplicationCerts {
|
||||
/// PEM-encoded CA cert from primary
|
||||
pub ca_cert_pem: String,
|
||||
/// PEM-encoded streaming_replica client cert (tls.crt)
|
||||
pub streaming_replica_cert_pem: String,
|
||||
/// PEM-encoded streaming_replica client key (tls.key)
|
||||
pub streaming_replica_key_pem: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PostgreSQLEndpoint {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
}
|
||||
125
harmony/src/modules/postgresql/failover.rs
Normal file
125
harmony/src/modules/postgresql/failover.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use async_trait::async_trait;
|
||||
use log::debug;
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{
|
||||
modules::postgresql::capability::{
|
||||
BootstrapConfig, BootstrapStrategy, ExternalClusterConfig, PostgreSQL,
|
||||
PostgreSQLClusterRole, PostgreSQLConfig, PostgreSQLEndpoint, ReplicaConfig,
|
||||
ReplicationCerts,
|
||||
},
|
||||
topology::FailoverTopology,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl<T: PostgreSQL> PostgreSQL for FailoverTopology<T> {
|
||||
async fn deploy(&self, config: &PostgreSQLConfig) -> Result<String, String> {
|
||||
info!(
|
||||
"Starting deployment of failover topology '{}'",
|
||||
config.cluster_name
|
||||
);
|
||||
|
||||
let primary_config = PostgreSQLConfig {
|
||||
cluster_name: config.cluster_name.clone(),
|
||||
instances: config.instances,
|
||||
storage_size: config.storage_size.clone(),
|
||||
role: PostgreSQLClusterRole::Primary,
|
||||
};
|
||||
|
||||
info!(
|
||||
"Deploying primary cluster '{{}}' ({} instances, {:?} storage)",
|
||||
primary_config.cluster_name, primary_config.storage_size
|
||||
);
|
||||
|
||||
let primary_cluster_name = self.primary.deploy(&primary_config).await?;
|
||||
|
||||
info!("Primary cluster '{primary_cluster_name}' deployed successfully");
|
||||
|
||||
info!("Retrieving replication certificates for primary '{primary_cluster_name}'");
|
||||
|
||||
let certs = self
|
||||
.primary
|
||||
.get_replication_certs(&primary_cluster_name)
|
||||
.await?;
|
||||
|
||||
info!("Replication certificates retrieved successfully");
|
||||
|
||||
info!("Retrieving public endpoint for primary '{primary_cluster_name}");
|
||||
|
||||
let endpoint = self
|
||||
.primary
|
||||
.get_public_endpoint(&primary_cluster_name)
|
||||
.await?
|
||||
.ok_or_else(|| "No public endpoint configured on primary cluster".to_string())?;
|
||||
|
||||
info!(
|
||||
"Public endpoint '{}:{}' retrieved for primary",
|
||||
endpoint.host, endpoint.port
|
||||
);
|
||||
|
||||
info!("Configuring replica connection parameters and bootstrap");
|
||||
|
||||
let mut connection_parameters = HashMap::new();
|
||||
connection_parameters.insert("host".to_string(), endpoint.host);
|
||||
connection_parameters.insert("port".to_string(), endpoint.port.to_string());
|
||||
connection_parameters.insert("dbname".to_string(), "postgres".to_string());
|
||||
connection_parameters.insert("user".to_string(), "streaming_replica".to_string());
|
||||
connection_parameters.insert("sslmode".to_string(), "verify-ca".to_string());
|
||||
connection_parameters.insert("sslnegotiation".to_string(), "direct".to_string());
|
||||
|
||||
debug!("Replica connection parameters: {:?}", connection_parameters);
|
||||
|
||||
let external_cluster = ExternalClusterConfig {
|
||||
name: primary_cluster_name.clone(),
|
||||
connection_parameters,
|
||||
};
|
||||
|
||||
let bootstrap_config = BootstrapConfig {
|
||||
strategy: BootstrapStrategy::PgBasebackup,
|
||||
};
|
||||
|
||||
let replica_cluster_config = ReplicaConfig {
|
||||
primary_cluster_name: primary_cluster_name.clone(),
|
||||
replication_certs: certs,
|
||||
bootstrap: bootstrap_config,
|
||||
external_cluster,
|
||||
};
|
||||
|
||||
let replica_config = PostgreSQLConfig {
|
||||
cluster_name: format!("{}-replica", primary_cluster_name),
|
||||
instances: config.instances,
|
||||
storage_size: config.storage_size.clone(),
|
||||
role: PostgreSQLClusterRole::Replica(replica_cluster_config),
|
||||
};
|
||||
|
||||
info!(
|
||||
"Deploying replica cluster '{}' ({} instances, {:?} storage) on replica topology",
|
||||
replica_config.cluster_name, replica_config.instances, replica_config.storage_size
|
||||
);
|
||||
|
||||
self.replica.deploy(&replica_config).await?;
|
||||
|
||||
info!(
|
||||
"Replica cluster '{}' deployed successfully; failover topology '{}' ready",
|
||||
replica_config.cluster_name, config.cluster_name
|
||||
);
|
||||
|
||||
Ok(primary_cluster_name)
|
||||
}
|
||||
|
||||
async fn get_replication_certs(&self, cluster_name: &str) -> Result<ReplicationCerts, String> {
|
||||
self.primary.get_replication_certs(cluster_name).await
|
||||
}
|
||||
|
||||
async fn get_endpoint(&self, cluster_name: &str) -> Result<PostgreSQLEndpoint, String> {
|
||||
self.primary.get_endpoint(cluster_name).await
|
||||
}
|
||||
|
||||
async fn get_public_endpoint(
|
||||
&self,
|
||||
cluster_name: &str,
|
||||
) -> Result<Option<PostgreSQLEndpoint>, String> {
|
||||
self.primary.get_public_endpoint(cluster_name).await
|
||||
}
|
||||
}
|
||||
6
harmony/src/modules/postgresql/mod.rs
Normal file
6
harmony/src/modules/postgresql/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
pub mod capability;
|
||||
mod score;
|
||||
|
||||
pub mod failover;
|
||||
mod operator;
|
||||
pub use operator::*;
|
||||
102
harmony/src/modules/postgresql/operator.rs
Normal file
102
harmony/src/modules/postgresql/operator.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::interpret::Interpret;
|
||||
use crate::modules::k8s::apps::crd::{Subscription, SubscriptionSpec};
|
||||
use crate::modules::k8s::resource::K8sResourceScore;
|
||||
use crate::score::Score;
|
||||
use crate::topology::{K8sclient, Topology};
|
||||
|
||||
/// Install the CloudNativePg (CNPG) Operator via an OperatorHub `Subscription`.
|
||||
///
|
||||
/// This Score creates a a `Subscription` Custom Resource in the specified namespace.
|
||||
///
|
||||
/// The default implementation pulls the `cloudnative-pg` operator from the
|
||||
/// `operatorhubio-catalog` source.
|
||||
///
|
||||
/// # Goals
|
||||
/// - Deploy the CNPG Operator to manage PostgreSQL clusters in OpenShift/OKD environments.
|
||||
///
|
||||
/// # Usage
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||
/// let score = CloudNativePgOperatorScore::default();
|
||||
/// ```
|
||||
///
|
||||
/// Or, you can take control of most relevant fiedls this way :
|
||||
///
|
||||
/// ```
|
||||
/// use harmony::modules::postgresql::CloudNativePgOperatorScore;
|
||||
///
|
||||
/// let score = CloudNativePgOperatorScore {
|
||||
/// namespace: "custom-cnpg-namespace".to_string(),
|
||||
/// channel: "unstable-i-want-bleedingedge-v498437".to_string(),
|
||||
/// install_plan_approval: "Manual".to_string(),
|
||||
/// source: "operatorhubio-catalog-but-different".to_string(),
|
||||
/// source_namespace: "i-customize-everything-marketplace".to_string(),
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// # Limitations
|
||||
/// - **OperatorHub dependency**: Requires OperatorHub catalog sources (e.g., `operatorhubio-catalog` in `openshift-marketplace`).
|
||||
/// - **OKD/OpenShift assumption**: Catalog/source names and namespaces are hardcoded for OKD-like setups; adjust for upstream OpenShift.
|
||||
/// - **Hardcoded values in Default implementation**: Operator name (`cloudnative-pg`), channel (`stable-v1`), automatic install plan approval.
|
||||
/// - **No config options**: Does not support custom `SubscriptionConfig` (env vars, node selectors, tolerations).
|
||||
/// - **Single namespace**: Targets one namespace per score instance.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CloudNativePgOperatorScore {
|
||||
pub namespace: String,
|
||||
pub channel: String,
|
||||
pub install_plan_approval: String,
|
||||
pub source: String,
|
||||
pub source_namespace: String,
|
||||
}
|
||||
|
||||
impl Default for CloudNativePgOperatorScore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
namespace: "openshift-operators".to_string(),
|
||||
channel: "stable-v1".to_string(),
|
||||
install_plan_approval: "Automatic".to_string(),
|
||||
source: "operatorhubio-catalog".to_string(),
|
||||
source_namespace: "openshift-marketplace".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CloudNativePgOperatorScore {
|
||||
pub fn new(namespace: &str) -> Self {
|
||||
Self {
|
||||
namespace: namespace.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + K8sclient> Score<T> for CloudNativePgOperatorScore {
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
let metadata = ObjectMeta {
|
||||
name: Some("cloudnative-pg".to_string()),
|
||||
namespace: Some(self.namespace.clone()),
|
||||
..ObjectMeta::default()
|
||||
};
|
||||
|
||||
let spec = SubscriptionSpec {
|
||||
channel: Some(self.channel.clone()),
|
||||
config: None,
|
||||
install_plan_approval: Some(self.install_plan_approval.clone()),
|
||||
name: "cloudnative-pg".to_string(),
|
||||
source: self.source.clone(),
|
||||
source_namespace: self.source_namespace.clone(),
|
||||
starting_csv: None,
|
||||
};
|
||||
|
||||
let subscription = Subscription { metadata, spec };
|
||||
|
||||
K8sResourceScore::single(subscription, Some(self.namespace.clone())).create_interpret()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!("CloudNativePgOperatorScore({})", self.namespace)
|
||||
}
|
||||
}
|
||||
88
harmony/src/modules/postgresql/score.rs
Normal file
88
harmony/src/modules/postgresql/score.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use crate::{
|
||||
domain::{data::Version, interpret::InterpretStatus},
|
||||
interpret::{Interpret, InterpretError, InterpretName, Outcome},
|
||||
inventory::Inventory,
|
||||
modules::postgresql::capability::PostgreSQL,
|
||||
score::Score,
|
||||
topology::Topology,
|
||||
};
|
||||
|
||||
use super::capability::*;
|
||||
|
||||
use harmony_types::id::Id;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct PostgreSQLScore {
|
||||
config: PostgreSQLConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PostgreSQLInterpret {
|
||||
config: PostgreSQLConfig,
|
||||
version: Version,
|
||||
status: InterpretStatus,
|
||||
}
|
||||
|
||||
impl PostgreSQLInterpret {
|
||||
pub fn new(config: PostgreSQLConfig) -> Self {
|
||||
let version = Version::from("1.0.0").expect("Version should be valid");
|
||||
Self {
|
||||
config,
|
||||
version,
|
||||
status: InterpretStatus::QUEUED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Topology + PostgreSQL> Score<T> for PostgreSQLScore {
|
||||
fn name(&self) -> String {
|
||||
"PostgreSQLScore".to_string()
|
||||
}
|
||||
|
||||
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
|
||||
Box::new(PostgreSQLInterpret::new(self.config.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + PostgreSQL> Interpret<T> for PostgreSQLInterpret {
|
||||
fn get_name(&self) -> InterpretName {
|
||||
InterpretName::Custom("PostgreSQLInterpret")
|
||||
}
|
||||
|
||||
fn get_version(&self) -> crate::domain::data::Version {
|
||||
self.version.clone()
|
||||
}
|
||||
|
||||
fn get_status(&self) -> InterpretStatus {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
fn get_children(&self) -> Vec<Id> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&self,
|
||||
_inventory: &Inventory,
|
||||
topology: &T,
|
||||
) -> Result<Outcome, InterpretError> {
|
||||
info!(
|
||||
"Executing PostgreSQLInterpret with config {:?}",
|
||||
self.config
|
||||
);
|
||||
|
||||
let cluster_name = topology
|
||||
.deploy(&self.config)
|
||||
.await
|
||||
.map_err(|e| InterpretError::from(e))?;
|
||||
|
||||
Ok(Outcome::success(format!(
|
||||
"Deployed PostgreSQL cluster `{cluster_name}`"
|
||||
)))
|
||||
}
|
||||
}
|
||||
11
harmony_inventory_agent/build_docker.sh
Executable file
11
harmony_inventory_agent/build_docker.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
cargo build -p harmony_inventory_agent --release --target x86_64-unknown-linux-musl
|
||||
|
||||
SCRIPT_DIR="$(dirname ${0})"
|
||||
|
||||
cd "${SCRIPT_DIR}/docker/"
|
||||
|
||||
cp ../../target/x86_64-unknown-linux-musl/release/harmony_inventory_agent .
|
||||
|
||||
docker build . -t hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
|
||||
docker push hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
1
harmony_inventory_agent/docker/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
harmony_inventory_agent
|
||||
17
harmony_inventory_agent/docker/Dockerfile
Normal file
17
harmony_inventory_agent/docker/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM debian:12-slim
|
||||
|
||||
# install packages required to make these commands available : lspci, lsmod, dmidecode, smartctl, ip
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends pciutils kmod dmidecode smartmontools iproute2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app/
|
||||
|
||||
COPY harmony_inventory_agent /app/
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
CMD [ "/app/harmony_inventory_agent" ]
|
||||
|
||||
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
117
harmony_inventory_agent/harmony-inventory-agent-daemonset.yaml
Normal file
@@ -0,0 +1,117 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/audit: privileged
|
||||
pod-security.kubernetes.io/warn: privileged
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
---
|
||||
# Grant the built-in "privileged" SCC to the SA
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
rules:
|
||||
- apiGroups: ["security.openshift.io"]
|
||||
resources: ["securitycontextconstraints"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: use-privileged-scc
|
||||
namespace: harmony-inventory-agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: use-privileged-scc
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: harmony-inventory-agent
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-agent
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_agent
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_agent=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
#
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: harmony-inventory-builder
|
||||
namespace: harmony-inventory-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy: {}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: harmony-inventory-builder
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: harmony-inventory-builder
|
||||
spec:
|
||||
serviceAccountName: harmony-inventory-agent
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: inventory-agent
|
||||
image: hub.nationtech.io/harmony/harmony_inventory_builder
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: "harmony_inventory_builder=trace,info"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# optional: leave the rest unset since privileged SCC allows it
|
||||
@@ -1,5 +1,5 @@
|
||||
use harmony_types::net::MacAddress;
|
||||
use log::{debug, warn};
|
||||
use log::{debug, trace, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
@@ -121,20 +121,48 @@ pub struct ManagementInterface {
|
||||
|
||||
impl PhysicalHost {
|
||||
pub fn gather() -> Result<Self, String> {
|
||||
trace!("Start gathering physical host information");
|
||||
let mut sys = System::new_all();
|
||||
trace!("System new_all called");
|
||||
sys.refresh_all();
|
||||
trace!("System refresh_all called");
|
||||
|
||||
Self::all_tools_available()?;
|
||||
|
||||
trace!("All tools_available success");
|
||||
|
||||
let storage_drives = Self::gather_storage_drives()?;
|
||||
trace!("got storage drives");
|
||||
|
||||
let storage_controller = Self::gather_storage_controller()?;
|
||||
trace!("got storage controller");
|
||||
|
||||
let memory_modules = Self::gather_memory_modules()?;
|
||||
trace!("got memory_modules");
|
||||
|
||||
let cpus = Self::gather_cpus(&sys)?;
|
||||
trace!("got cpus");
|
||||
|
||||
let chipset = Self::gather_chipset()?;
|
||||
trace!("got chipsets");
|
||||
|
||||
let network_interfaces = Self::gather_network_interfaces()?;
|
||||
trace!("got network_interfaces");
|
||||
|
||||
let management_interface = Self::gather_management_interface()?;
|
||||
trace!("got management_interface");
|
||||
|
||||
let host_uuid = Self::get_host_uuid()?;
|
||||
|
||||
Ok(Self {
|
||||
storage_drives: Self::gather_storage_drives()?,
|
||||
storage_controller: Self::gather_storage_controller()?,
|
||||
memory_modules: Self::gather_memory_modules()?,
|
||||
cpus: Self::gather_cpus(&sys)?,
|
||||
chipset: Self::gather_chipset()?,
|
||||
network_interfaces: Self::gather_network_interfaces()?,
|
||||
management_interface: Self::gather_management_interface()?,
|
||||
host_uuid: Self::get_host_uuid()?,
|
||||
storage_drives,
|
||||
storage_controller,
|
||||
memory_modules,
|
||||
cpus,
|
||||
chipset,
|
||||
network_interfaces,
|
||||
management_interface,
|
||||
host_uuid,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -208,6 +236,8 @@ impl PhysicalHost {
|
||||
));
|
||||
}
|
||||
|
||||
debug!("All tools found!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -231,7 +261,10 @@ impl PhysicalHost {
|
||||
fn gather_storage_drives() -> Result<Vec<StorageDrive>, String> {
|
||||
let mut drives = Vec::new();
|
||||
|
||||
trace!("Starting storage drive discovery using lsblk");
|
||||
|
||||
// Use lsblk with JSON output for robust parsing
|
||||
trace!("Executing 'lsblk -d -o NAME,MODEL,SERIAL,SIZE,ROTA,WWN -n -e 7 --json'");
|
||||
let output = Command::new("lsblk")
|
||||
.args([
|
||||
"-d",
|
||||
@@ -245,13 +278,18 @@ impl PhysicalHost {
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute lsblk: {}", e))?;
|
||||
|
||||
trace!(
|
||||
"lsblk command executed successfully (status: {:?})",
|
||||
output.status
|
||||
);
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"lsblk command failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
));
|
||||
let stderr_str = String::from_utf8_lossy(&output.stderr);
|
||||
debug!("lsblk command failed: {stderr_str}");
|
||||
return Err(format!("lsblk command failed: {stderr_str}"));
|
||||
}
|
||||
|
||||
trace!("Parsing lsblk JSON output");
|
||||
let json: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| format!("Failed to parse lsblk JSON output: {}", e))?;
|
||||
|
||||
@@ -260,6 +298,8 @@ impl PhysicalHost {
|
||||
.and_then(|v| v.as_array())
|
||||
.ok_or("Invalid lsblk JSON: missing 'blockdevices' array")?;
|
||||
|
||||
trace!("Found {} blockdevices in lsblk output", blockdevices.len());
|
||||
|
||||
for device in blockdevices {
|
||||
let name = device
|
||||
.get("name")
|
||||
@@ -268,52 +308,72 @@ impl PhysicalHost {
|
||||
.to_string();
|
||||
|
||||
if name.is_empty() {
|
||||
trace!("Skipping unnamed device entry: {:?}", device);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Inspecting block device: {name}");
|
||||
|
||||
// Extract metadata fields
|
||||
let model = device
|
||||
.get("model")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Model for {name}: '{}'", model);
|
||||
|
||||
let serial = device
|
||||
.get("serial")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.unwrap_or_default();
|
||||
trace!("Serial for {name}: '{}'", serial);
|
||||
|
||||
let size_str = device
|
||||
.get("size")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or("Missing 'size' in lsblk device")?;
|
||||
trace!("Reported size for {name}: {}", size_str);
|
||||
let size_bytes = Self::parse_size(size_str)?;
|
||||
trace!("Parsed size for {name}: {} bytes", size_bytes);
|
||||
|
||||
let rotational = device
|
||||
.get("rota")
|
||||
.and_then(|v| v.as_bool())
|
||||
.ok_or("Missing 'rota' in lsblk device")?;
|
||||
trace!("Rotational flag for {name}: {}", rotational);
|
||||
|
||||
let wwn = device
|
||||
.get("wwn")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty() && s != "null");
|
||||
trace!("WWN for {name}: {:?}", wwn);
|
||||
|
||||
let device_path = Path::new("/sys/block").join(&name);
|
||||
trace!("Sysfs path for {name}: {:?}", device_path);
|
||||
|
||||
trace!("Reading logical block size for {name}");
|
||||
let logical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/logical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read logical block size for {}: {}", name, e))?;
|
||||
trace!("Logical block size for {name}: {}", logical_block_size);
|
||||
|
||||
trace!("Reading physical block size for {name}");
|
||||
let physical_block_size = Self::read_sysfs_u32(
|
||||
&device_path.join("queue/physical_block_size"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read physical block size for {}: {}", name, e))?;
|
||||
trace!("Physical block size for {name}: {}", physical_block_size);
|
||||
|
||||
trace!("Determining interface type for {name}");
|
||||
let interface_type = Self::get_interface_type(&name, &device_path)?;
|
||||
trace!("Interface type for {name}: {}", interface_type);
|
||||
|
||||
trace!("Getting SMART status for {name}");
|
||||
let smart_status = Self::get_smart_status(&name)?;
|
||||
trace!("SMART status for {name}: {:?}", smart_status);
|
||||
|
||||
let mut drive = StorageDrive {
|
||||
name: name.clone(),
|
||||
@@ -330,19 +390,31 @@ impl PhysicalHost {
|
||||
|
||||
// Enhance with additional sysfs info if available
|
||||
if device_path.exists() {
|
||||
trace!("Enhancing drive {name} with extra sysfs metadata");
|
||||
if drive.model.is_empty() {
|
||||
trace!("Reading model from sysfs for {name}");
|
||||
drive.model = Self::read_sysfs_string(&device_path.join("device/model"))
|
||||
.unwrap_or(format!("Failed to read model for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read model for {}", name));
|
||||
}
|
||||
if drive.serial.is_empty() {
|
||||
trace!("Reading serial from sysfs for {name}");
|
||||
drive.serial = Self::read_sysfs_string(&device_path.join("device/serial"))
|
||||
.unwrap_or(format!("Failed to read serial for {}", name));
|
||||
.unwrap_or_else(|_| format!("Failed to read serial for {}", name));
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Sysfs path {:?} not found for drive {name}, skipping extra metadata",
|
||||
device_path
|
||||
);
|
||||
}
|
||||
|
||||
debug!("Discovered storage drive: {drive:?}");
|
||||
drives.push(drive);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} storage drives", drives.len());
|
||||
trace!("All discovered dives: {drives:?}");
|
||||
|
||||
Ok(drives)
|
||||
}
|
||||
|
||||
@@ -418,6 +490,8 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found storage controller {controller:?}");
|
||||
|
||||
Ok(controller)
|
||||
}
|
||||
|
||||
@@ -486,6 +560,7 @@ impl PhysicalHost {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found memory modules {modules:?}");
|
||||
Ok(modules)
|
||||
}
|
||||
|
||||
@@ -501,22 +576,30 @@ impl PhysicalHost {
|
||||
frequency_mhz: global_cpu.frequency(),
|
||||
});
|
||||
|
||||
debug!("Found cpus {cpus:?}");
|
||||
|
||||
Ok(cpus)
|
||||
}
|
||||
|
||||
fn gather_chipset() -> Result<Chipset, String> {
|
||||
Ok(Chipset {
|
||||
let chipset = Chipset {
|
||||
name: Self::read_dmi("baseboard-product-name")?,
|
||||
vendor: Self::read_dmi("baseboard-manufacturer")?,
|
||||
})
|
||||
};
|
||||
|
||||
debug!("Found chipset {chipset:?}");
|
||||
|
||||
Ok(chipset)
|
||||
}
|
||||
|
||||
fn gather_network_interfaces() -> Result<Vec<NetworkInterface>, String> {
|
||||
let mut interfaces = Vec::new();
|
||||
let sys_net_path = Path::new("/sys/class/net");
|
||||
trace!("Reading /sys/class/net");
|
||||
|
||||
let entries = fs::read_dir(sys_net_path)
|
||||
.map_err(|e| format!("Failed to read /sys/class/net: {}", e))?;
|
||||
trace!("Got entries {entries:?}");
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?;
|
||||
@@ -525,6 +608,7 @@ impl PhysicalHost {
|
||||
.into_string()
|
||||
.map_err(|_| "Invalid UTF-8 in interface name")?;
|
||||
let iface_path = entry.path();
|
||||
trace!("Inspecting interface {iface_name} path {iface_path:?}");
|
||||
|
||||
// Skip virtual interfaces
|
||||
if iface_name.starts_with("lo")
|
||||
@@ -535,70 +619,101 @@ impl PhysicalHost {
|
||||
|| iface_name.starts_with("tun")
|
||||
|| iface_name.starts_with("wg")
|
||||
{
|
||||
trace!(
|
||||
"Skipping interface {iface_name} because it appears to be virtual/unsupported"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it's a physical interface by looking for device directory
|
||||
if !iface_path.join("device").exists() {
|
||||
trace!(
|
||||
"Skipping interface {iface_name} since {iface_path:?}/device does not exist"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
trace!("Reading MAC address for {iface_name}");
|
||||
let mac_address = Self::read_sysfs_string(&iface_path.join("address"))
|
||||
.map_err(|e| format!("Failed to read MAC address for {}: {}", iface_name, e))?;
|
||||
let mac_address = MacAddress::try_from(mac_address).map_err(|e| e.to_string())?;
|
||||
trace!("MAC address for {iface_name}: {mac_address}");
|
||||
|
||||
let speed_mbps = if iface_path.join("speed").exists() {
|
||||
match Self::read_sysfs_u32(&iface_path.join("speed")) {
|
||||
Ok(speed) => Some(speed),
|
||||
let speed_path = iface_path.join("speed");
|
||||
let speed_mbps = if speed_path.exists() {
|
||||
trace!("Reading speed for {iface_name} from {:?}", speed_path);
|
||||
match Self::read_sysfs_u32(&speed_path) {
|
||||
Ok(speed) => {
|
||||
trace!("Speed for {iface_name}: {speed} Mbps");
|
||||
Some(speed)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
"Failed to read speed for {}: {} . This is expected to fail on wifi interfaces.",
|
||||
"Failed to read speed for {}: {} (this may be expected on Wi‑Fi interfaces)",
|
||||
iface_name, e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!("Speed file not found for {iface_name}, skipping");
|
||||
None
|
||||
};
|
||||
|
||||
trace!("Reading operstate for {iface_name}");
|
||||
let operstate = Self::read_sysfs_string(&iface_path.join("operstate"))
|
||||
.map_err(|e| format!("Failed to read operstate for {}: {}", iface_name, e))?;
|
||||
trace!("Operstate for {iface_name}: {operstate}");
|
||||
|
||||
trace!("Reading MTU for {iface_name}");
|
||||
let mtu = Self::read_sysfs_u32(&iface_path.join("mtu"))
|
||||
.map_err(|e| format!("Failed to read MTU for {}: {}", iface_name, e))?;
|
||||
trace!("MTU for {iface_name}: {mtu}");
|
||||
|
||||
trace!("Reading driver for {iface_name}");
|
||||
let driver =
|
||||
Self::read_sysfs_symlink_basename(&iface_path.join("device/driver/module"))
|
||||
.map_err(|e| format!("Failed to read driver for {}: {}", iface_name, e))?;
|
||||
trace!("Driver for {iface_name}: {driver}");
|
||||
|
||||
trace!("Reading firmware version for {iface_name}");
|
||||
let firmware_version = Self::read_sysfs_opt_string(
|
||||
&iface_path.join("device/firmware_version"),
|
||||
)
|
||||
.map_err(|e| format!("Failed to read firmware version for {}: {}", iface_name, e))?;
|
||||
trace!("Firmware version for {iface_name}: {firmware_version:?}");
|
||||
|
||||
// Get IP addresses using ip command with JSON output
|
||||
trace!("Fetching IP addresses for {iface_name}");
|
||||
let (ipv4_addresses, ipv6_addresses) = Self::get_interface_ips_json(&iface_name)
|
||||
.map_err(|e| format!("Failed to get IP addresses for {}: {}", iface_name, e))?;
|
||||
trace!("Interface {iface_name} has IPv4: {ipv4_addresses:?}, IPv6: {ipv6_addresses:?}");
|
||||
|
||||
interfaces.push(NetworkInterface {
|
||||
name: iface_name,
|
||||
let is_up = operstate == "up";
|
||||
trace!("Constructing NetworkInterface for {iface_name} (is_up={is_up})");
|
||||
|
||||
let iface = NetworkInterface {
|
||||
name: iface_name.clone(),
|
||||
mac_address,
|
||||
speed_mbps,
|
||||
is_up: operstate == "up",
|
||||
is_up,
|
||||
mtu,
|
||||
ipv4_addresses,
|
||||
ipv6_addresses,
|
||||
driver,
|
||||
firmware_version,
|
||||
});
|
||||
};
|
||||
|
||||
debug!("Discovered interface: {iface:?}");
|
||||
interfaces.push(iface);
|
||||
}
|
||||
|
||||
debug!("Discovered total {} network interfaces", interfaces.len());
|
||||
trace!("Interfaces collected: {interfaces:?}");
|
||||
Ok(interfaces)
|
||||
}
|
||||
|
||||
fn gather_management_interface() -> Result<Option<ManagementInterface>, String> {
|
||||
if Path::new("/dev/ipmi0").exists() {
|
||||
let mgmt = if Path::new("/dev/ipmi0").exists() {
|
||||
Ok(Some(ManagementInterface {
|
||||
kind: "IPMI".to_string(),
|
||||
address: None,
|
||||
@@ -612,11 +727,16 @@ impl PhysicalHost {
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Found management interface {mgmt:?}");
|
||||
mgmt
|
||||
}
|
||||
|
||||
fn get_host_uuid() -> Result<String, String> {
|
||||
Self::read_dmi("system-uuid")
|
||||
let uuid = Self::read_dmi("system-uuid");
|
||||
debug!("Found uuid {uuid:?}");
|
||||
uuid
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
@@ -709,7 +829,8 @@ impl PhysicalHost {
|
||||
Ok("Ramdisk".to_string())
|
||||
} else {
|
||||
// Try to determine from device path
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))?;
|
||||
let subsystem = Self::read_sysfs_string(&device_path.join("device/subsystem"))
|
||||
.unwrap_or(String::new());
|
||||
Ok(subsystem
|
||||
.split('/')
|
||||
.next_back()
|
||||
@@ -779,6 +900,8 @@ impl PhysicalHost {
|
||||
size.map(|s| s as u64)
|
||||
}
|
||||
|
||||
// FIXME when scanning an interface that is part of a bond/bridge we won't get an address on the
|
||||
// interface, we should be looking at the bond/bridge device. For example, br-ex on k8s nodes.
|
||||
fn get_interface_ips_json(iface_name: &str) -> Result<(Vec<String>, Vec<String>), String> {
|
||||
let mut ipv4 = Vec::new();
|
||||
let mut ipv6 = Vec::new();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use log::{debug, error, info, warn};
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use mdns_sd::{ServiceDaemon, ServiceInfo};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -12,6 +12,7 @@ use crate::{
|
||||
/// This function is synchronous and non-blocking. It spawns a background Tokio task
|
||||
/// to handle the mDNS advertisement for the lifetime of the application.
|
||||
pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
trace!("starting advertisement process for port {service_port}");
|
||||
let host_id = match PhysicalHost::gather() {
|
||||
Ok(host) => Some(host.host_uuid),
|
||||
Err(e) => {
|
||||
@@ -20,11 +21,15 @@ pub fn advertise(service_port: u16) -> Result<(), PresenceError> {
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Found host id {host_id:?}");
|
||||
|
||||
let instance_name = format!(
|
||||
"inventory-agent-{}",
|
||||
host_id.clone().unwrap_or("unknown".to_string())
|
||||
);
|
||||
|
||||
trace!("Found host id {host_id:?}, name : {instance_name}");
|
||||
|
||||
let spawned_msg = format!("Spawned local presence advertisement task for '{instance_name}'.");
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
||||
@@ -28,7 +28,7 @@ async fn inventory() -> impl Responder {
|
||||
async fn main() -> std::io::Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "8080".to_string());
|
||||
let port = env::var("HARMONY_INVENTORY_AGENT_PORT").unwrap_or_else(|_| "25000".to_string());
|
||||
let port = port
|
||||
.parse::<u16>()
|
||||
.expect(&format!("Invalid port number, cannot parse to u16 {port}"));
|
||||
|
||||
@@ -135,15 +135,17 @@ pub fn ingress_path(input: TokenStream) -> TokenStream {
|
||||
|
||||
#[proc_macro]
|
||||
pub fn cidrv4(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as LitStr);
|
||||
let cidr_str = input.value();
|
||||
let lit = parse_macro_input!(input as LitStr);
|
||||
|
||||
if cidr_str.parse::<cidr::Ipv4Cidr>().is_ok() {
|
||||
let expanded = quote! { #cidr_str.parse::<cidr::Ipv4Cidr>().unwrap() };
|
||||
return TokenStream::from(expanded);
|
||||
}
|
||||
// This is the IMPORTANT part:
|
||||
// we re-emit the *string literal itself*
|
||||
let expanded = quote! {
|
||||
#lit
|
||||
.parse::<cidr::Ipv4Cidr>()
|
||||
.expect("Invalid IPv4 CIDR literal")
|
||||
};
|
||||
|
||||
panic!("Invalid IPv4 CIDR : {}", cidr_str);
|
||||
TokenStream::from(expanded)
|
||||
}
|
||||
|
||||
/// Creates a `harmony_types::net::Url::Url` from a string literal.
|
||||
|
||||
@@ -9,3 +9,4 @@ license.workspace = true
|
||||
serde.workspace = true
|
||||
url.workspace = true
|
||||
rand.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod id;
|
||||
pub mod net;
|
||||
pub mod storage;
|
||||
pub mod switch;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
|
||||
pub struct MacAddress(pub [u8; 6]);
|
||||
|
||||
impl MacAddress {
|
||||
@@ -19,6 +19,14 @@ impl From<&MacAddress> for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for MacAddress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("MacAddress")
|
||||
.field(&String::from(self))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MacAddress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&String::from(self))
|
||||
|
||||
6
harmony_types/src/storage.rs
Normal file
6
harmony_types/src/storage.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord, Debug)]
|
||||
pub struct StorageSize {
|
||||
size_bytes: u64,
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
use log::trace;
|
||||
use serde::Serialize;
|
||||
use std::{fmt, str::FromStr};
|
||||
|
||||
/// Simple error type for port parsing failures.
|
||||
@@ -21,7 +23,7 @@ impl fmt::Display for PortParseError {
|
||||
/// Represents the atomic, physical location of a switch port: `<Stack>/<Module>/<Port>`.
|
||||
///
|
||||
/// Example: `1/1/1`
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Serialize)]
|
||||
pub struct PortLocation(pub u8, pub u8, pub u8);
|
||||
|
||||
impl fmt::Display for PortLocation {
|
||||
@@ -70,6 +72,12 @@ impl FromStr for PortLocation {
|
||||
pub enum PortDeclaration {
|
||||
/// A single switch port defined by its location. Example: `PortDeclaration::Single(1/1/1)`
|
||||
Single(PortLocation),
|
||||
/// A Named port, often used for virtual ports such as PortChannels. Example
|
||||
/// ```rust
|
||||
/// # use harmony_types::switch::PortDeclaration;
|
||||
/// PortDeclaration::Named("1".to_string());
|
||||
/// ```
|
||||
Named(String),
|
||||
/// A strictly sequential range defined by two endpoints using the hyphen separator (`-`).
|
||||
/// All ports between the endpoints (inclusive) are implicitly included.
|
||||
/// Example: `PortDeclaration::Range(1/1/1, 1/1/4)`
|
||||
@@ -130,8 +138,25 @@ impl PortDeclaration {
|
||||
return Ok(PortDeclaration::Set(start_port, end_port));
|
||||
}
|
||||
|
||||
let location = PortLocation::from_str(port_str)?;
|
||||
Ok(PortDeclaration::Single(location))
|
||||
match PortLocation::from_str(port_str) {
|
||||
Ok(loc) => Ok(PortDeclaration::Single(loc)),
|
||||
Err(e) => {
|
||||
let segments: Vec<&str> = port_str.split('/').collect();
|
||||
let segment_count = segments.len();
|
||||
|
||||
// Logic:
|
||||
// If it has 3 segments but failed (e.g., "1/A/1"), it's an InvalidSegment.
|
||||
// If it has MORE than 3 segments (e.g., "1/1/1/1" or "1/1/1/"), it's an InvalidFormat.
|
||||
if segment_count >= 3 {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Otherwise, it's something else entirely (e.g., "eth0", "vlan10"),
|
||||
// so we treat it as a Named port.
|
||||
trace!("Falling back on named port for: {port_str}");
|
||||
Ok(PortDeclaration::Named(port_str.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,6 +166,7 @@ impl fmt::Display for PortDeclaration {
|
||||
PortDeclaration::Single(port) => write!(f, "{port}"),
|
||||
PortDeclaration::Range(start, end) => write!(f, "{start}-{end}"),
|
||||
PortDeclaration::Set(start, end) => write!(f, "{start}*{end}"),
|
||||
PortDeclaration::Named(name) => write!(f, "{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,11 +106,37 @@ pub struct HAProxy {
|
||||
pub groups: MaybeString,
|
||||
pub users: MaybeString,
|
||||
pub cpus: MaybeString,
|
||||
pub resolvers: MaybeString,
|
||||
pub resolvers: HAProxyResolvers,
|
||||
pub mailers: MaybeString,
|
||||
pub maintenance: Maintenance,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct HAProxyResolvers {
|
||||
#[yaserde(rename = "resolver")]
|
||||
pub resolver: Option<Resolver>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Resolver {
|
||||
pub id: String,
|
||||
pub enabled: i32,
|
||||
pub name: String,
|
||||
pub description: MaybeString,
|
||||
pub nameservers: String,
|
||||
pub parse_resolv_conf: String,
|
||||
pub resolve_retries: i32,
|
||||
pub timeout_resolve: String,
|
||||
pub timeout_retry: String,
|
||||
pub accepted_payload_size: MaybeString,
|
||||
pub hold_valid: MaybeString,
|
||||
pub hold_obsolete: MaybeString,
|
||||
pub hold_refused: MaybeString,
|
||||
pub hold_nx: MaybeString,
|
||||
pub hold_timeout: MaybeString,
|
||||
pub hold_other: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Maintenance {
|
||||
#[yaserde(rename = "cronjobs")]
|
||||
|
||||
@@ -9,7 +9,7 @@ pub struct Interface {
|
||||
pub physical_interface_name: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub mtu: Option<MaybeString>,
|
||||
pub enable: MaybeString,
|
||||
pub enable: Option<MaybeString>,
|
||||
pub lock: Option<MaybeString>,
|
||||
#[yaserde(rename = "spoofmac")]
|
||||
pub spoof_mac: Option<MaybeString>,
|
||||
@@ -134,19 +134,15 @@ mod test {
|
||||
<interfaces>
|
||||
<paul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</paul>
|
||||
<anotherpaul>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</anotherpaul>
|
||||
<thirdone>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</thirdone>
|
||||
<andgofor4>
|
||||
<if></if>
|
||||
<enable/>
|
||||
</andgofor4>
|
||||
</interfaces>
|
||||
<bar>foo</bar>
|
||||
|
||||
@@ -136,6 +136,7 @@ pub struct Rule {
|
||||
pub updated: Option<Updated>,
|
||||
pub created: Option<Created>,
|
||||
pub disabled: Option<MaybeString>,
|
||||
pub log: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -195,7 +196,7 @@ pub struct System {
|
||||
pub disablechecksumoffloading: u8,
|
||||
pub disablesegmentationoffloading: u8,
|
||||
pub disablelargereceiveoffloading: u8,
|
||||
pub ipv6allow: u8,
|
||||
pub ipv6allow: Option<u8>,
|
||||
pub powerd_ac_mode: String,
|
||||
pub powerd_battery_mode: String,
|
||||
pub powerd_normal_mode: String,
|
||||
@@ -226,6 +227,7 @@ pub struct System {
|
||||
pub dns6gw: Option<String>,
|
||||
pub dns7gw: Option<String>,
|
||||
pub dns8gw: Option<String>,
|
||||
pub prefer_ipv4: Option<String>,
|
||||
pub dnsallowoverride: u8,
|
||||
pub dnsallowoverride_exclude: Option<MaybeString>,
|
||||
}
|
||||
@@ -329,6 +331,7 @@ pub struct Range {
|
||||
pub struct StaticMap {
|
||||
pub mac: String,
|
||||
pub ipaddr: String,
|
||||
pub cid: Option<MaybeString>,
|
||||
pub hostname: String,
|
||||
pub descr: Option<MaybeString>,
|
||||
pub winsserver: MaybeString,
|
||||
@@ -764,9 +767,19 @@ pub struct Jobs {
|
||||
pub struct Job {
|
||||
#[yaserde(attribute = true)]
|
||||
pub uuid: MaybeString,
|
||||
#[yaserde(rename = "name")]
|
||||
pub name: MaybeString,
|
||||
pub name: Option<MaybeString>,
|
||||
// Add other fields as needed
|
||||
pub origin: Option<MaybeString>,
|
||||
pub enabled: Option<MaybeString>,
|
||||
pub minutes: Option<MaybeString>,
|
||||
pub hours: Option<MaybeString>,
|
||||
pub days: Option<MaybeString>,
|
||||
pub months: Option<MaybeString>,
|
||||
pub weekdays: Option<MaybeString>,
|
||||
pub who: Option<MaybeString>,
|
||||
pub command: Option<MaybeString>,
|
||||
pub parameters: Option<MaybeString>,
|
||||
pub description: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -895,28 +908,28 @@ pub struct Proxy {
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct ProxyGeneral {
|
||||
pub enabled: i8,
|
||||
pub error_pages: String,
|
||||
pub error_pages: Option<MaybeString>,
|
||||
#[yaserde(rename = "icpPort")]
|
||||
pub icp_port: MaybeString,
|
||||
pub logging: Logging,
|
||||
#[yaserde(rename = "alternateDNSservers")]
|
||||
pub alternate_dns_servers: MaybeString,
|
||||
#[yaserde(rename = "dnsV4First")]
|
||||
pub dns_v4_first: i8,
|
||||
pub dns_v4_first: Option<MaybeString>,
|
||||
#[yaserde(rename = "forwardedForHandling")]
|
||||
pub forwarded_for_handling: String,
|
||||
pub forwarded_for_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "uriWhitespaceHandling")]
|
||||
pub uri_whitespace_handling: String,
|
||||
pub uri_whitespace_handling: Option<MaybeString>,
|
||||
#[yaserde(rename = "enablePinger")]
|
||||
pub enable_pinger: i8,
|
||||
#[yaserde(rename = "useViaHeader")]
|
||||
pub use_via_header: i8,
|
||||
pub use_via_header: Option<MaybeString>,
|
||||
#[yaserde(rename = "suppressVersion")]
|
||||
pub suppress_version: i32,
|
||||
pub suppress_version: Option<MaybeString>,
|
||||
#[yaserde(rename = "connecttimeout")]
|
||||
pub connect_timeout: MaybeString,
|
||||
pub connect_timeout: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleEmail")]
|
||||
pub visible_email: String,
|
||||
pub visible_email: Option<MaybeString>,
|
||||
#[yaserde(rename = "VisibleHostname")]
|
||||
pub visible_hostname: MaybeString,
|
||||
pub cache: Cache,
|
||||
@@ -953,7 +966,7 @@ pub struct LocalCache {
|
||||
pub cache_mem: i32,
|
||||
pub maximum_object_size: MaybeString,
|
||||
pub maximum_object_size_in_memory: MaybeString,
|
||||
pub memory_cache_mode: String,
|
||||
pub memory_cache_mode: MaybeString,
|
||||
pub size: i32,
|
||||
pub l1: i32,
|
||||
pub l2: i32,
|
||||
@@ -965,13 +978,13 @@ pub struct LocalCache {
|
||||
pub struct Traffic {
|
||||
pub enabled: i32,
|
||||
#[yaserde(rename = "maxDownloadSize")]
|
||||
pub max_download_size: i32,
|
||||
pub max_download_size: MaybeString,
|
||||
#[yaserde(rename = "maxUploadSize")]
|
||||
pub max_upload_size: i32,
|
||||
pub max_upload_size: MaybeString,
|
||||
#[yaserde(rename = "OverallBandwidthTrotteling")]
|
||||
pub overall_bandwidth_trotteling: i32,
|
||||
pub overall_bandwidth_trotteling: MaybeString,
|
||||
#[yaserde(rename = "perHostTrotteling")]
|
||||
pub per_host_trotteling: i32,
|
||||
pub per_host_trotteling: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -988,7 +1001,7 @@ pub struct ParentProxy {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Forward {
|
||||
pub interfaces: String,
|
||||
pub interfaces: MaybeString,
|
||||
pub port: i32,
|
||||
pub sslbumpport: i32,
|
||||
pub sslbump: i32,
|
||||
@@ -1033,9 +1046,9 @@ pub struct Acl {
|
||||
pub google_apps: MaybeString,
|
||||
pub youtube: MaybeString,
|
||||
#[yaserde(rename = "safePorts")]
|
||||
pub safe_ports: String,
|
||||
pub safe_ports: MaybeString,
|
||||
#[yaserde(rename = "sslPorts")]
|
||||
pub ssl_ports: String,
|
||||
pub ssl_ports: MaybeString,
|
||||
#[yaserde(rename = "remoteACLs")]
|
||||
pub remote_acls: RemoteAcls,
|
||||
}
|
||||
@@ -1051,9 +1064,9 @@ pub struct RemoteAcls {
|
||||
pub struct Icap {
|
||||
pub enable: i32,
|
||||
#[yaserde(rename = "RequestURL")]
|
||||
pub request_url: String,
|
||||
pub request_url: MaybeString,
|
||||
#[yaserde(rename = "ResponseURL")]
|
||||
pub response_url: String,
|
||||
pub response_url: MaybeString,
|
||||
#[yaserde(rename = "SendClientIP")]
|
||||
pub send_client_ip: i32,
|
||||
#[yaserde(rename = "SendUsername")]
|
||||
@@ -1061,7 +1074,7 @@ pub struct Icap {
|
||||
#[yaserde(rename = "EncodeUsername")]
|
||||
pub encode_username: i32,
|
||||
#[yaserde(rename = "UsernameHeader")]
|
||||
pub username_header: String,
|
||||
pub username_header: MaybeString,
|
||||
#[yaserde(rename = "EnablePreview")]
|
||||
pub enable_preview: i32,
|
||||
#[yaserde(rename = "PreviewSize")]
|
||||
@@ -1076,9 +1089,9 @@ pub struct Authentication {
|
||||
pub method: MaybeString,
|
||||
#[yaserde(rename = "authEnforceGroup")]
|
||||
pub auth_enforce_group: MaybeString,
|
||||
pub realm: String,
|
||||
pub credentialsttl: i32, // This field is already in snake_case
|
||||
pub children: i32,
|
||||
pub realm: MaybeString,
|
||||
pub credentialsttl: MaybeString, // This field is already in snake_case
|
||||
pub children: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1140,6 +1153,7 @@ pub struct UnboundGeneral {
|
||||
pub local_zone_type: String,
|
||||
pub outgoing_interface: MaybeString,
|
||||
pub enable_wpad: MaybeString,
|
||||
pub safesearch: MaybeString,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1193,15 +1207,15 @@ pub struct Acls {
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
pub struct Dnsbl {
|
||||
pub enabled: i32,
|
||||
pub safesearch: MaybeString,
|
||||
pub enabled: Option<i32>,
|
||||
pub safesearch: Option<MaybeString>,
|
||||
#[yaserde(rename = "type")]
|
||||
pub r#type: MaybeString,
|
||||
pub lists: MaybeString,
|
||||
pub whitelists: MaybeString,
|
||||
pub blocklists: MaybeString,
|
||||
pub wildcards: MaybeString,
|
||||
pub address: MaybeString,
|
||||
pub r#type: Option<MaybeString>,
|
||||
pub lists: Option<MaybeString>,
|
||||
pub whitelists: Option<MaybeString>,
|
||||
pub blocklists: Option<MaybeString>,
|
||||
pub wildcards: Option<MaybeString>,
|
||||
pub address: Option<MaybeString>,
|
||||
pub nxdomain: Option<i32>,
|
||||
}
|
||||
|
||||
@@ -1229,6 +1243,7 @@ pub struct Host {
|
||||
pub ttl: Option<MaybeString>,
|
||||
pub server: String,
|
||||
pub description: Option<String>,
|
||||
pub txtdata: MaybeString,
|
||||
}
|
||||
|
||||
impl Host {
|
||||
@@ -1244,6 +1259,7 @@ impl Host {
|
||||
ttl: Some(MaybeString::default()),
|
||||
mx: MaybeString::default(),
|
||||
description: None,
|
||||
txtdata: MaybeString::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1293,6 +1309,7 @@ pub struct WireguardServerItem {
|
||||
pub peers: String,
|
||||
pub endpoint: MaybeString,
|
||||
pub peer_dns: MaybeString,
|
||||
pub debug: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
@@ -1477,6 +1494,7 @@ pub struct Ppp {
|
||||
pub ports: Option<MaybeString>,
|
||||
pub username: Option<MaybeString>,
|
||||
pub password: Option<MaybeString>,
|
||||
pub provider: Option<MaybeString>,
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
|
||||
|
||||
@@ -86,10 +86,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
mac,
|
||||
ipaddr: ipaddr.to_string(),
|
||||
hostname,
|
||||
descr: Default::default(),
|
||||
winsserver: Default::default(),
|
||||
dnsserver: Default::default(),
|
||||
ntpserver: Default::default(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
existing_mappings.push(static_map);
|
||||
@@ -126,9 +123,7 @@ impl<'a> DhcpConfigLegacyISC<'a> {
|
||||
ipaddr: entry["ipaddr"].as_str().unwrap_or_default().to_string(),
|
||||
hostname: entry["hostname"].as_str().unwrap_or_default().to_string(),
|
||||
descr: entry["descr"].as_str().map(MaybeString::from),
|
||||
winsserver: MaybeString::default(),
|
||||
dnsserver: MaybeString::default(),
|
||||
ntpserver: MaybeString::default(),
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -612,6 +612,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -2003,6 +2003,7 @@
|
||||
<cacheflush/>
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<safesearch/>
|
||||
<enable_wpad/>
|
||||
</general>
|
||||
<advanced>
|
||||
@@ -2071,6 +2072,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -2081,6 +2083,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -2091,6 +2094,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -2117,6 +2121,7 @@
|
||||
<endpoint/>
|
||||
<peer_dns/>
|
||||
<carp_depend_on/>
|
||||
<debug/>
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
</server>
|
||||
</servers>
|
||||
|
||||
@@ -614,6 +614,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -750,6 +750,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -709,6 +709,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad>0</enable_wpad>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -951,6 +951,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
|
||||
@@ -808,6 +808,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -726,6 +726,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
@@ -793,6 +794,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -803,6 +805,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -813,6 +816,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -840,6 +844,7 @@
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
<endpoint/>
|
||||
<peer_dns/>
|
||||
<debug/>
|
||||
</server>
|
||||
</servers>
|
||||
</server>
|
||||
|
||||
@@ -718,6 +718,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity>0</hideidentity>
|
||||
@@ -785,6 +786,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="dd593e95-02bc-476f-8610-fa1ee454e950">
|
||||
<enabled>1</enabled>
|
||||
@@ -795,6 +797,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
<host uuid="e1606f96-dd38-471f-a3d7-ad25e41e810d">
|
||||
<enabled>1</enabled>
|
||||
@@ -805,6 +808,7 @@
|
||||
<mx/>
|
||||
<server>192.168.20.161</server>
|
||||
<description>Some app local</description>
|
||||
<txtdata/>
|
||||
</host>
|
||||
</hosts>
|
||||
<aliases/>
|
||||
@@ -832,6 +836,7 @@
|
||||
<gateway/>
|
||||
<carp_depend_on/>
|
||||
<peers>03031aec-2e84-462e-9eab-57762dde667a,98e6ca3d-1de9-449b-be80-77022221b509,67c0ace5-e802-4d2b-a536-f8b7a2db6f99,74b60fff-7844-4097-9966-f1c2b1ad29ff,3de82ad5-bc1b-4b91-9598-f906e58ac937,a95e6b5e-24a4-40b5-bb41-b79e784f6f1c,6c9a12c6-c1ca-4c14-866b-975406a30590,c33b308b-7125-4688-9561-989ace8787b5,e43f004a-23bf-4027-8fb0-953fbb40479f</peers>
|
||||
<debug/>
|
||||
</server>
|
||||
</servers>
|
||||
</server>
|
||||
|
||||
@@ -869,6 +869,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -862,6 +862,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
@@ -869,6 +869,7 @@
|
||||
<local_zone_type>transparent</local_zone_type>
|
||||
<outgoing_interface/>
|
||||
<enable_wpad/>
|
||||
<safesearch/>
|
||||
</general>
|
||||
<advanced>
|
||||
<hideidentity/>
|
||||
|
||||
Reference in New Issue
Block a user